hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
965d829908550f4b15045cf43eff2461a5aae268.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Cuda accelerated motion estimation for VP8 libvpx encoder by Pietro Paglierani, Giuliano Grossi, Federico Pedersini and Alessandro Petrini for Italtel and Universita' degli Studi di Milano 2015-2016, Milano */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <wchar.h> #include <locale.h> #include "vpx_config.h" #include "cuda/typedef_cuda.h" #include "cuda/me_cuda.h" #ifdef __cplusplus extern "C" { #endif #if HAVE_CUDA_ENABLED_DEVICE __device__ __constant__ MV MV_16x12_lookup_split[] = { {-12,-2}, {-12, 0}, {-12, 2}, // Unit: pixel {-10,-5}, {-10,-3}, {-10,-1}, {-10, 1}, {-10, 3}, {-10, 5}, {-8,-8}, {-8,-6}, {-8,-4}, {-8,-2}, {-8, 0}, {-8, 2}, {-8, 4}, {-8, 6}, {-8, 8}, {-6,-9}, {-6,-7}, {-6,-5}, {-6,-3}, {-6,-1}, {-6, 1}, {-6, 3}, {-6, 5}, {-6, 7}, {-6, 9}, {-4,-12}, {-4,-10}, {-4,-8}, {-4,-6}, {-4,-4}, {-4,-2}, {-4, 0}, {-4, 2}, {-4, 4}, {-4, 6}, {-4, 8}, {-4,10}, {-4,12}, {-2,-13}, {-2,-11}, {-2,-9}, {-2,-7}, {-2,-5}, {-2,-3}, {-2,-1}, {-2, 1}, {-2, 3}, {-2, 5}, {-2, 7}, {-2, 9}, {-2,11}, {-2,13}, {0,-16}, {0,-14}, {0,-12}, {0,-10}, {0,-8}, {0,-6}, {0,-4}, {0,-2}, {0, 0}, {0, 2}, {0, 4}, {0, 6}, {0, 8}, {0,10}, {0,12}, {0,14}, {0,16}, {2,-13}, {2,-11}, {2,-9}, {2,-7}, {2,-5}, {2,-3}, {2,-1}, {2, 1}, {2, 3}, {2, 5}, {2, 7}, {2, 9}, {2,11}, {2,13}, {4,-12}, {4,-10}, {4,-8}, {4,-6}, {4,-4}, {4,-2}, {4, 0}, {4, 2}, {4, 4}, {4, 6}, {4, 8}, {4,10}, {4,12}, {6,-9}, {6,-7}, {6,-5}, {6,-3}, {6,-1}, {6, 1}, {6, 3}, {6, 5}, {6, 7}, {6, 9}, {8,-8}, {8,-6}, {8,-4}, {8,-2}, {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {10,-5}, {10,-3}, {10,-1}, {10, 1}, {10, 3}, {10, 5}, {12,-2}, {12, 0}, {12, 2}, {0, 0} }; // 127 + 1 candidati __device__ __constant__ MV MV_lookup_refin_split[] = { {-2, 0} , // Unit: pixel {-1, -2}, {-1, -1}, {-1, 0}, {-1, 1}, {-1, 2}, { 0, -1}, { 0, 0}, { 0, 1}, { 1, -2}, { 1, -1}, { 1, 0}, { 1, 1}, { 1, 2}, { 2, 0}, {0, 0} }; // in piu', per arrivare a 16 __constant__ int offset_16x12__[128]; __constant__ int offset_16x12_refin__[16]; void setup_constant_mem_split(int img_stride) { int I = img_stride; int off_16x12[] = { -12*I-2, -12*I, -12*I+2, // Offsets -10*I-5, -10*I-3, -10*I-1, -10*I+1, -10*I+3, -10*I+5, -8*I-8, -8*I-6, -8*I-4, -8*I-2, -8*I, -8*I+2, -8*I+4, -8*I+6, -8*I+8, -6*I-9, -6*I-7, -6*I-5, -6*I-3, -6*I-1, -6*I+1, -6*I+3, -6*I+5, -6*I+7, -6*I+9, -4*I-12, -4*I-10, -4*I-8, -4*I-6, -4*I-4, -4*I-2, -4*I, -4*I+2, -4*I+4, -4*I+6, -4*I+8, -4*I+10, -4*I+12, -2*I-13, -2*I-11, -2*I-9, -2*I-7, -2*I-5, -2*I-3, -2*I-1, -2*I+1, -2*I+3, -2*I+5, -2*I+7, -2*I+9, -2*I+11, -2*I+13, -16, -14, -12, -10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10, 12, 14, 16, 2*I-13, 2*I-11, 2*I-9, 2*I-7, 2*I-5, 2*I-3, 2*I-1, 2*I+1, 2*I+3, 2*I+5, 2*I+7, 2*I+9, 2*I+11, 2*I+13, 4*I-12, 4*I-10, 4*I-8, 4*I-6, 4*I-4, 4*I-2, 4*I, 4*I+2, 4*I+4, 4*I+6, 4*I+8, 4*I+10, 4*I+12, 6*I-9, 6*I-7, 6*I-5, 6*I-3, 6*I-1, 6*I+1, 6*I+3, 6*I+5, 6*I+7, 6*I+9, 8*I-8, 8*I-6, 8*I-4, 8*I-2, 8*I, 8*I+2, 8*I+4, 8*I+6, 8*I+8, 10*I-5, 10*I-3, 10*I-1, 10*I+1, 10*I+3, 10*I+5, 12*I-2, 12*I, 12*I+2, 0 }; // in piu', per arrivare a 128 int off_16x12_refin[] = { -2*I , -I-2, -I-1, -I, -I+1, -I+2, -1, 0, 1, -I-2, I-1, I, I+1, I+2, 2*I, 0 }; // in piu', per arrivare a 16 // copy to device constant memory (hipMemcpyToSymbol(offset_16x12__, off_16x12, 128*sizeof(int))); (hipMemcpyToSymbol(offset_16x12_refin__, off_16x12_refin, 16*sizeof(int))); } __inline__ __device__ uint32_t __vabsdiff4( uint32_t u, uint32_t v ) { uint32_t w = 0; asm volatile("vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(w) : "r"(u), "r"(v), "r"(w)); return w; } __global__ void me_cuda_split (const uint8_t * __restrict__ const in_frame, const uint8_t * __restrict__ const ref_frame, int const streamID, int const streamSize, int const stride, int const width, int const num_MB_width, int const split_on, int_mv * __restrict__ const MVs_g, int_mv * __restrict__ const MVs_split_g ) { __shared__ uint16_t diff[128][32]; // Risky! It might overflow in one pathologic instance __shared__ uint8_t minpos[32]; __shared__ uint8_t minpos_refin[32]; // configurazione di lancio: blocks per grid: 16 x 1 x 1 // threads per block: 4 x 8 x 1 int32_t TID = threadIdx.y * blockDim.x + threadIdx.x; // Thread Index (0..32) int32_t i, j; int32_t MBoffset = streamID * streamSize + blockIdx.x; int32_t blockX = MBoffset % num_MB_width; // colonna int32_t blockY = MBoffset / num_MB_width; // riga // Occhio: immagine di riferimento ha cornice (larghezza tot = stride) mentre immagine input no (largh tot = width) int32_t im_offset = 16 * (blockY * stride + blockX) + (2 * threadIdx.y * stride + 4 * threadIdx.x) + 32 * (stride + 1); int32_t im_offset_raw = 16 * (blockY * width + blockX) + (2 * threadIdx.y * width + 4 * threadIdx.x); const uint8_t *refptr = ref_frame + im_offset; const uint8_t *imptr = in_frame + im_offset_raw; uint32_t delta_img = (1 * width); uint32_t delta_ref = (1 * stride); uint32_t img0 = (uint32_t) ( (*(imptr + 3) << 24) | (*(imptr + 2) << 16) | (*(imptr + 1) << 8) | *(imptr) ); uint32_t img1 = (uint32_t) ( (*(imptr + delta_img + 3) << 24) | (*(imptr + delta_img + 2) << 16) | (*(imptr + delta_img + 1) << 8) | *(imptr + delta_img) ); uint32_t ref0, ref1; /* Organizzazione dei thread all'interno del macroblocco. Ogni thread considera 4 pixel e i 4 immediatamente sottostanti. Accesso a memoria globale non e' ottimale (coalescenza a gruppi di quattro), ma questo schema permette di raggruppare le sad in somme parziali per calcolare tutte le splitmv. TID 0 TID 1 TID 2 TID 3 TID 4 TID 5 TID 6 TID 7 TID 9 TID 9 TID 10 TID 11 TID 12 TID 13 TID 14 TID 15 TID 16 TID 17 TID 18 TID 19 TID 20 TID 21 TID 22 TID 23 TID 24 TID 25 TID 26 TID 27 TID 28 TID 29 TID 30 TID 31 */ /* Calcolo delle sad, risultati memorizzati nella matrice diff. 32 32 TID = 32 sotto blocchi, ognuno contenente sad parziali / \ diff[128][32] 128 candidati mv Ogni thread si fa carico si un sottoblocco di 8 pixel e calcola la sad per ogni candidato mv */ for (i = 0; i < 128; i++){ const uint8_t *refp = refptr + offset_16x12__[i]; int32_t sad_result; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); // Accumulazione delle colonne di diff in modo da formare sad di blocchi per ogni candidato mv // Prima reduction, generazione 16 sad 4x4 // 0 1 2 3 | 8 9 10 11 | 16 17 18 19 | 24 25 26 27 <- j // ^ ^ ^ ^ | ^ ^ ^ ^ | ^ ^ ^ ^ | ^ ^ ^ ^ // 4 5 6 7 | 12 13 14 15 | 20 21 22 23 | 28 29 30 31 <- j + 4 for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID ][j] += diff[TID ][j+4]; diff[TID+32][j] += diff[TID+32][j+4]; diff[TID+64][j] += diff[TID+64][j+4]; diff[TID+96][j] += diff[TID+96][j+4]; } __syncthreads(); // Seconda reduction, generazione 4 sad 8x8 // 4 | 12 | 20 | 28 <- (8 * i) + 4 // ^ | ^ | ^ | ^ // 0 1 8 9 | 2 3 10 11 | 16 17 24 25 | 18 19 26 27 <- [j j+1 j+8 j+9] for (i = 0; i < 4; i++) { j = 2 * i + (i / 2) * 12; // genera 0, 2, 16, 18 per i = 0 .. 3 diff[TID ][(8 * i) + 4] = diff[TID ][j] + diff[TID ][j + 1] + diff[TID ][j + 8] + diff[TID ][j + 9]; diff[TID+32][(8 * i) + 4] = diff[TID+32][j] + diff[TID+32][j + 1] + diff[TID+32][j + 8] + diff[TID+32][j + 9]; diff[TID+64][(8 * i) + 4] = diff[TID+64][j] + diff[TID+64][j + 1] + diff[TID+64][j + 8] + diff[TID+64][j + 9]; diff[TID+96][(8 * i) + 4] = diff[TID+96][j] + diff[TID+96][j + 1] + diff[TID+96][j + 8] + diff[TID+96][j + 9]; } __syncthreads(); // Terza reduction (a), generazione 2 sad 8x16 // 8x16 // 22 | 30 <- 22 + (i * 8) // ^ | ^ // 4 20 | 12 28 for (i = 0; i < 2; i++) { j = 4 + (8 * i); // genera 4, 12 per i = 0..1 diff[TID ][22 + (i * 8)] = diff[TID ][j] + diff[TID ][j + 16]; diff[TID+32][22 + (i * 8)] = diff[TID+32][j] + diff[TID+32][j + 16]; diff[TID+64][22 + (i * 8)] = diff[TID+64][j] + diff[TID+64][j + 16]; diff[TID+96][22 + (i * 8)] = diff[TID+96][j] + diff[TID+96][j + 16]; } __syncthreads(); // potrebbe non servire! // Terza reduction (b), generazione 2 sad 16x8 // 16x8 // 6 | 14 <- 6*(i+1) + 2*i = 8 * i + 6 // ^ | ^ // 4 12 | 20 28 <- [j j+8] for (i = 0; i < 2; i++) { j = 4 + (16 * i); // genera 4, 20 per i = 0..1 diff[TID ][8 * i + 6] = diff[TID ][j] + diff[TID ][j + 8]; diff[TID+32][8 * i + 6] = diff[TID+32][j] + diff[TID+32][j + 8]; diff[TID+64][8 * i + 6] = diff[TID+64][j] + diff[TID+64][j + 8]; diff[TID+96][8 * i + 6] = diff[TID+96][j] + diff[TID+96][j + 8]; } __syncthreads(); // Quarta reduction, generazione 1 sad 16x16 // 31 // ^ // 6 14 diff[TID ][31] = diff[TID ][6] + diff[TID ][14]; diff[TID+32][31] = diff[TID+32][6] + diff[TID+32][14]; diff[TID+64][31] = diff[TID+64][6] + diff[TID+64][14]; diff[TID+96][31] = diff[TID+96][6] + diff[TID+96][14]; __syncthreads(); // Ricerca del minimo di ogni colonna. A noi interessano 25 delle 32 colonne, // ma per non creare divergenza tra i thread eseguiamo la ricerca anche dove non serve minpos[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo una colonna for( i = 1; i < 128; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos[TID] = i; } } // Salva mv 16x16 // Questo potrebbe essere fatto meglio, conj 25 thread che lavorano contemporaneamente, // ma devo studiare come indicizzare l'accesso alla matrice globale. if ( TID == 31 ) { MVs_g[MBoffset].as_mv.row = MV_16x12_lookup_split[ minpos[TID] ].row * 8; MVs_g[MBoffset].as_mv.col = MV_16x12_lookup_split[ minpos[TID] ].col * 8; } if (split_on == SPLITMV_ON) { // salva mv 4x4 if ( TID < 16 ) { MVs_split_g[MBoffset*24 + TID].as_mv.row = MV_16x12_lookup_split[ minpos[TID + (TID / 4) * 4] ].row * 8; MVs_split_g[MBoffset*24 + TID].as_mv.col = MV_16x12_lookup_split[ minpos[TID + (TID / 4) * 4] ].col * 8; } // salva mv 8x8 if ( TID < 4 ) { MVs_split_g[MBoffset*24 + 16 + TID].as_mv.row = MV_16x12_lookup_split[ minpos[8 * TID + 4] ].row * 8; MVs_split_g[MBoffset*24 + 16 + TID].as_mv.col = MV_16x12_lookup_split[ minpos[8 * TID + 4] ].col * 8; } // salva mv 8x16 e 16x8 if ( TID < 2 ) { MVs_split_g[MBoffset*24 + 20 + TID].as_mv.row = MV_16x12_lookup_split[ minpos[8 * TID + 22] ].row * 8; MVs_split_g[MBoffset*24 + 22 + TID].as_mv.row = MV_16x12_lookup_split[ minpos[8 * TID + 6] ].row * 8; MVs_split_g[MBoffset*24 + 20 + TID].as_mv.col = MV_16x12_lookup_split[ minpos[8 * TID + 22] ].col * 8; MVs_split_g[MBoffset*24 + 22 + TID].as_mv.col = MV_16x12_lookup_split[ minpos[8 * TID + 6] ].col * 8; } __syncthreads(); } /////////////////////////////////////////////////////////////////////////////////////////// // STEP 2: pixel-scale Motion Vector Search // 1. // Ricerca di un MV per ogni blocco 4x4 // 16 blocchi, 2 thread per blocco. Stesso schema per decidere TID => thread 0 e 4 fanno 1 blocco; 1 e 5 il secondo, ecc... // Risultati sad memorizzati in diff[i][TID] con 0 < i < 15 // Questa volta non possiamo piu' sfruttare che refptr punti alla stesso indice, quindi posso // calcolare contemporaneamente ogni sad per tid e accumulare, ma posso sfruttare il // parallelismo tra mv dello stesso tipo: prima calcolo in parall tutte le 4x4, poi le 8x8, ecc... if (split_on == SPLITMV_ON) { // Update refpointer al miglior mv j = (TID % 4) + (TID / 8) * 8; // Genera 0 1 2 3 0 1 2 3 8 9 10 11 8 9 10 11 16 17... // perche' TID 0 e 4 vengono traslati dello stesso mv corrispondente // a quello ora presente in colonna 0 di minpos refptr += offset_16x12__[minpos[j]]; for (i = 0; i < 16; i++) { const uint8_t *refp = refptr + offset_16x12_refin__[i]; int32_t sad_result; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); if ( TID < 16 ) { for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID][j] += diff[TID][j+4]; } } minpos_refin[TID] = 0; __syncthreads(); for( i = 1; i < 16; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } // salva MV della split 4x4 if ( TID < 16 ) { MVs_split_g[MBoffset*24 + TID].as_mv.row += (MV_lookup_refin_split[ minpos_refin[TID + (TID / 4) * 4] ].row * 8); MVs_split_g[MBoffset*24 + TID].as_mv.col += (MV_lookup_refin_split[ minpos_refin[TID + (TID / 4) * 4] ].col * 8); } // 2. // Ricerca di un mv per ogni blocco 8x8 // Procedura esattamente identica alla precedente: TID che elaborano stesso blocco avranno // mv impostato coerentemente. Differente accumulazione (per blocco 0: TID 0 1 4 5 8 9 12 13) // Update refpointer al miglior mv j = (TID / 8) * 8 + 4; // Genera 4 4 4 4 4 4 4 4 12 12 12 12 12 12 12 12 20 20 20 20... refptr = ref_frame + im_offset + offset_16x12__[ minpos[j] ]; for (i = 0; i < 16; i++) { const uint8_t *refp = refptr + offset_16x12_refin__[i]; int32_t sad_result; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); // Sono pigro, copio e incollo la stessa manfrina if ( TID < 16 ) { for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID][j] += diff[TID][j+4]; } } __syncthreads(); if ( TID < 16 ) { for (i = 0; i < 4; i++) { j = 2 * i + (i / 2) * 12; diff[TID][(8 * i) + 4] = diff[TID][j] + diff[TID][j + 1] + diff[TID][j + 8] + diff[TID][j + 9]; } } __syncthreads(); minpos_refin[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo ogni colonna // anche se le colonne interessanti sono solo la 4, 12, 20 e 28 for( i = 1; i < 16; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } __syncthreads(); // Salva i MV della split 8x8 if ( TID < 4 ) { MVs_split_g[MBoffset*24 + 16 + TID].as_mv.row += (MV_lookup_refin_split[ minpos_refin[8 * TID + 4] ].row * 8); MVs_split_g[MBoffset*24 + 16 + TID].as_mv.col += (MV_lookup_refin_split[ minpos_refin[8 * TID + 4] ].col * 8); } } // 5. // Refining search su blocco 16x16 // Update RefPointer to the best motion vector refptr = ref_frame + im_offset + offset_16x12__[ minpos[31] ]; for (i = 0; i < 16; i++) { const uint8_t *refp = refptr + offset_16x12_refin__[i]; int32_t sad_result; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); for (i=0; i<16; i++) diff[TID][i] += diff[TID][i+16]; __syncthreads(); for (i=0; i<8; i++) diff[TID][i] += diff[TID][i+8]; __syncthreads(); for (i=0; i<4; i++) diff[TID][i] += diff[TID][i+4]; __syncthreads(); diff[TID][0] += (diff[TID][1] + diff[TID][2] + diff[TID][3]); __syncthreads(); minpos[TID] = TID; __syncthreads(); // qui posso lasciare il vecchio modo di trovare il minimo, funziona lo stesso if( TID < 8 ) if( diff[TID][0] > diff[TID+8][0] ) { diff[TID][0] = diff[TID+8][0]; minpos[TID] = minpos[TID+8]; } __syncthreads(); if( TID < 4 ) if( diff[TID][0] > diff[TID + 4][0] ) { diff[TID][0] = diff[TID + 4][0]; minpos[TID] = minpos[TID + 4]; } __syncthreads(); if( TID < 2 ) if( diff[TID][0] > diff[TID + 2][0] ) { diff[TID][0] = diff[TID + 2][0]; minpos[TID] = minpos[TID + 2]; } __syncthreads(); if( TID == 0 ) { if( diff[0][0] > diff[1][0] ) { diff[0][0] = diff[1][0]; minpos[0] = minpos[1]; } MVs_g[MBoffset].as_mv.row += (MV_lookup_refin_split[ minpos[0] ].row * 8); MVs_g[MBoffset].as_mv.col += (MV_lookup_refin_split[ minpos[0] ].col * 8); } } void me_kernel_launch_split( VP8_COMMON * const common, const uint8_t * const in_frame, const uint8_t * const ref_frame, int const streamID, int const split_on, int_mv * const MVs, int_mv * const MVs_split ) { #if CUDA_VERBOSE float elapsedTime; hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); CHECK(hipEventRecord(start)); #endif hipLaunchKernelGGL(( me_cuda_split) , dim3(common->GPU.gridDim), dim3(common->GPU.blockDim), 0, common->GPU.streams.frame[streamID] , in_frame, ref_frame, streamID, common->GPU.streamSize, common->gpu_frame.stride, common->gpu_frame.width, common->gpu_frame.num_MB_width, split_on, MVs, MVs_split ); #if CUDA_VERBOSE CHECK(hipEventRecord(stop)); CHECK(hipEventSynchronize(stop)); CHECK(hipEventElapsedTime(&elapsedTime, start, stop)); printf("\n[GPU] ME elapsed time streams[%d]: %.4f ms\n",streamID,elapsedTime); CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); add_STATS((double)elapsedTime,0); #endif } void me_cuda_launch_interleaved_split( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { //int MV_size_16 = 16*sizeof(int_mv); int MV_size_16 = cm->GPU.streamSize * sizeof(int_mv); // for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int t = 0; t < cm->GPU.num_mb16th; t++) { int s = cm->GPU.streamLaunchOrder[t]; //int offset = 16*s; int offset = cm->GPU.streamSize * s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e hipMemcpyAsync andava a leggere oltre i limiti degli array if (offset + cm->GPU.streamSize > cm->gpu_frame.num_mv) MV_size_16 = ( offset + cm->GPU.streamSize - cm->gpu_frame.num_mv ) * sizeof( int_mv ); if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, SPLITMV_ON, (cm->gpu_frame.MVs_g)[0], cm->gpu_frame.MVs_split_g ); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_split_h)[offset],&(cm->gpu_frame.MVs_split_g)[offset],24 * MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[1], 0 ); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[2], 0 ); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } void me_cuda_launch_not_interleaved_split( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { //int MV_size_16 = 16*sizeof(int_mv); int MV_size_16 = cm->GPU.streamSize * sizeof(int_mv); // for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int t = 0; t < cm->GPU.num_mb16th; t++) { int s = cm->GPU.streamLaunchOrder[t]; //int offset = 16*s; int offset = cm->GPU.streamSize * s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e hipMemcpyAsync andava a leggere oltre i limiti degli array if (offset + cm->GPU.streamSize > cm->gpu_frame.num_mv) MV_size_16 = ( offset + cm->GPU.streamSize - cm->gpu_frame.num_mv ) * sizeof( int_mv ); if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, SPLITMV_ON, (cm->gpu_frame.MVs_g)[0], cm->gpu_frame.MVs_split_g ); // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[1], 0 ); // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[2], 0 ); if (ref_frame_flags & GPUFLAG_LAST_FRAME) { CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16,hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_split_h)[offset],&(cm->gpu_frame.MVs_split_g)[offset],24 * MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } #endif /* HAVE_CUDA_ENABLED_DEVICE */ #ifdef __cplusplus } #endif
965d829908550f4b15045cf43eff2461a5aae268.cu
/* Cuda accelerated motion estimation for VP8 libvpx encoder by Pietro Paglierani, Giuliano Grossi, Federico Pedersini and Alessandro Petrini for Italtel and Universita' degli Studi di Milano 2015-2016, Milano */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <wchar.h> #include <locale.h> #include "vpx_config.h" #include "cuda/typedef_cuda.h" #include "cuda/me_cuda.h" #ifdef __cplusplus extern "C" { #endif #if HAVE_CUDA_ENABLED_DEVICE __device__ __constant__ MV MV_16x12_lookup_split[] = { {-12,-2}, {-12, 0}, {-12, 2}, // Unit: pixel {-10,-5}, {-10,-3}, {-10,-1}, {-10, 1}, {-10, 3}, {-10, 5}, {-8,-8}, {-8,-6}, {-8,-4}, {-8,-2}, {-8, 0}, {-8, 2}, {-8, 4}, {-8, 6}, {-8, 8}, {-6,-9}, {-6,-7}, {-6,-5}, {-6,-3}, {-6,-1}, {-6, 1}, {-6, 3}, {-6, 5}, {-6, 7}, {-6, 9}, {-4,-12}, {-4,-10}, {-4,-8}, {-4,-6}, {-4,-4}, {-4,-2}, {-4, 0}, {-4, 2}, {-4, 4}, {-4, 6}, {-4, 8}, {-4,10}, {-4,12}, {-2,-13}, {-2,-11}, {-2,-9}, {-2,-7}, {-2,-5}, {-2,-3}, {-2,-1}, {-2, 1}, {-2, 3}, {-2, 5}, {-2, 7}, {-2, 9}, {-2,11}, {-2,13}, {0,-16}, {0,-14}, {0,-12}, {0,-10}, {0,-8}, {0,-6}, {0,-4}, {0,-2}, {0, 0}, {0, 2}, {0, 4}, {0, 6}, {0, 8}, {0,10}, {0,12}, {0,14}, {0,16}, {2,-13}, {2,-11}, {2,-9}, {2,-7}, {2,-5}, {2,-3}, {2,-1}, {2, 1}, {2, 3}, {2, 5}, {2, 7}, {2, 9}, {2,11}, {2,13}, {4,-12}, {4,-10}, {4,-8}, {4,-6}, {4,-4}, {4,-2}, {4, 0}, {4, 2}, {4, 4}, {4, 6}, {4, 8}, {4,10}, {4,12}, {6,-9}, {6,-7}, {6,-5}, {6,-3}, {6,-1}, {6, 1}, {6, 3}, {6, 5}, {6, 7}, {6, 9}, {8,-8}, {8,-6}, {8,-4}, {8,-2}, {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {10,-5}, {10,-3}, {10,-1}, {10, 1}, {10, 3}, {10, 5}, {12,-2}, {12, 0}, {12, 2}, {0, 0} }; // 127 + 1 candidati __device__ __constant__ MV MV_lookup_refin_split[] = { {-2, 0} , // Unit: pixel {-1, -2}, {-1, -1}, {-1, 0}, {-1, 1}, {-1, 2}, { 0, -1}, { 0, 0}, { 0, 1}, { 1, -2}, { 1, -1}, { 1, 0}, { 1, 1}, { 1, 2}, { 2, 0}, {0, 0} }; // in piu', per arrivare a 16 __constant__ int offset_16x12__[128]; __constant__ int offset_16x12_refin__[16]; void setup_constant_mem_split(int img_stride) { int I = img_stride; int off_16x12[] = { -12*I-2, -12*I, -12*I+2, // Offsets -10*I-5, -10*I-3, -10*I-1, -10*I+1, -10*I+3, -10*I+5, -8*I-8, -8*I-6, -8*I-4, -8*I-2, -8*I, -8*I+2, -8*I+4, -8*I+6, -8*I+8, -6*I-9, -6*I-7, -6*I-5, -6*I-3, -6*I-1, -6*I+1, -6*I+3, -6*I+5, -6*I+7, -6*I+9, -4*I-12, -4*I-10, -4*I-8, -4*I-6, -4*I-4, -4*I-2, -4*I, -4*I+2, -4*I+4, -4*I+6, -4*I+8, -4*I+10, -4*I+12, -2*I-13, -2*I-11, -2*I-9, -2*I-7, -2*I-5, -2*I-3, -2*I-1, -2*I+1, -2*I+3, -2*I+5, -2*I+7, -2*I+9, -2*I+11, -2*I+13, -16, -14, -12, -10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10, 12, 14, 16, 2*I-13, 2*I-11, 2*I-9, 2*I-7, 2*I-5, 2*I-3, 2*I-1, 2*I+1, 2*I+3, 2*I+5, 2*I+7, 2*I+9, 2*I+11, 2*I+13, 4*I-12, 4*I-10, 4*I-8, 4*I-6, 4*I-4, 4*I-2, 4*I, 4*I+2, 4*I+4, 4*I+6, 4*I+8, 4*I+10, 4*I+12, 6*I-9, 6*I-7, 6*I-5, 6*I-3, 6*I-1, 6*I+1, 6*I+3, 6*I+5, 6*I+7, 6*I+9, 8*I-8, 8*I-6, 8*I-4, 8*I-2, 8*I, 8*I+2, 8*I+4, 8*I+6, 8*I+8, 10*I-5, 10*I-3, 10*I-1, 10*I+1, 10*I+3, 10*I+5, 12*I-2, 12*I, 12*I+2, 0 }; // in piu', per arrivare a 128 int off_16x12_refin[] = { -2*I , -I-2, -I-1, -I, -I+1, -I+2, -1, 0, 1, -I-2, I-1, I, I+1, I+2, 2*I, 0 }; // in piu', per arrivare a 16 // copy to device constant memory (cudaMemcpyToSymbol(offset_16x12__, off_16x12, 128*sizeof(int))); (cudaMemcpyToSymbol(offset_16x12_refin__, off_16x12_refin, 16*sizeof(int))); } __inline__ __device__ uint32_t __vabsdiff4( uint32_t u, uint32_t v ) { uint32_t w = 0; asm volatile("vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(w) : "r"(u), "r"(v), "r"(w)); return w; } __global__ void me_cuda_split (const uint8_t * __restrict__ const in_frame, const uint8_t * __restrict__ const ref_frame, int const streamID, int const streamSize, int const stride, int const width, int const num_MB_width, int const split_on, int_mv * __restrict__ const MVs_g, int_mv * __restrict__ const MVs_split_g ) { __shared__ uint16_t diff[128][32]; // Risky! It might overflow in one pathologic instance __shared__ uint8_t minpos[32]; __shared__ uint8_t minpos_refin[32]; // configurazione di lancio: blocks per grid: 16 x 1 x 1 // threads per block: 4 x 8 x 1 int32_t TID = threadIdx.y * blockDim.x + threadIdx.x; // Thread Index (0..32) int32_t i, j; int32_t MBoffset = streamID * streamSize + blockIdx.x; int32_t blockX = MBoffset % num_MB_width; // colonna int32_t blockY = MBoffset / num_MB_width; // riga // Occhio: immagine di riferimento ha cornice (larghezza tot = stride) mentre immagine input no (largh tot = width) int32_t im_offset = 16 * (blockY * stride + blockX) + (2 * threadIdx.y * stride + 4 * threadIdx.x) + 32 * (stride + 1); int32_t im_offset_raw = 16 * (blockY * width + blockX) + (2 * threadIdx.y * width + 4 * threadIdx.x); const uint8_t *refptr = ref_frame + im_offset; const uint8_t *imptr = in_frame + im_offset_raw; uint32_t delta_img = (1 * width); uint32_t delta_ref = (1 * stride); uint32_t img0 = (uint32_t) ( (*(imptr + 3) << 24) | (*(imptr + 2) << 16) | (*(imptr + 1) << 8) | *(imptr) ); uint32_t img1 = (uint32_t) ( (*(imptr + delta_img + 3) << 24) | (*(imptr + delta_img + 2) << 16) | (*(imptr + delta_img + 1) << 8) | *(imptr + delta_img) ); uint32_t ref0, ref1; /* Organizzazione dei thread all'interno del macroblocco. Ogni thread considera 4 pixel e i 4 immediatamente sottostanti. Accesso a memoria globale non e' ottimale (coalescenza a gruppi di quattro), ma questo schema permette di raggruppare le sad in somme parziali per calcolare tutte le splitmv. ╔══════════╦══════════╦══════════╦══════════╗ ║ TID 0 ║ TID 1 ║ TID 2 ║ TID 3 ║ ╟──────────╫──────────╫──────────╫──────────╢ ║ TID 4 ║ TID 5 ║ TID 6 ║ TID 7 ║ ╠══════════╬══════════╬══════════╬══════════╣ ║ TID 9 ║ TID 9 ║ TID 10 ║ TID 11 ║ ╟──────────╫──────────╫──────────╫──────────╢ ║ TID 12 ║ TID 13 ║ TID 14 ║ TID 15 ║ ╠══════════╬══════════╬══════════╬══════════╣ ║ TID 16 ║ TID 17 ║ TID 18 ║ TID 19 ║ ╟──────────╫──────────╫──────────╫──────────╢ ║ TID 20 ║ TID 21 ║ TID 22 ║ TID 23 ║ ╠══════════╬══════════╬══════════╬══════════╣ ║ TID 24 ║ TID 25 ║ TID 26 ║ TID 27 ║ ╟──────────╫──────────╫──────────╫──────────╢ ║ TID 28 ║ TID 29 ║ TID 30 ║ TID 31 ║ ╚══════════╩══════════╩══════════╩══════════╝ */ /* Calcolo delle sad, risultati memorizzati nella matrice diff. 32 32 TID = 32 sotto blocchi, ognuno contenente sad parziali / \ ┌───────────────┐ │ │ │ │ │ │ │ diff[128][32] │ 128 candidati mv │ │ │ │ │ │ └───────────────┘ Ogni thread si fa carico si un sottoblocco di 8 pixel e calcola la sad per ogni candidato mv */ for (i = 0; i < 128; i++){ const uint8_t *refp = refptr + offset_16x12__[i]; int32_t sad_result; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); // Accumulazione delle colonne di diff in modo da formare sad di blocchi per ogni candidato mv // Prima reduction, generazione 16 sad 4x4 // 0 1 2 3 | 8 9 10 11 | 16 17 18 19 | 24 25 26 27 <- j // ^ ^ ^ ^ | ^ ^ ^ ^ | ^ ^ ^ ^ | ^ ^ ^ ^ // 4 5 6 7 | 12 13 14 15 | 20 21 22 23 | 28 29 30 31 <- j + 4 for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID ][j] += diff[TID ][j+4]; diff[TID+32][j] += diff[TID+32][j+4]; diff[TID+64][j] += diff[TID+64][j+4]; diff[TID+96][j] += diff[TID+96][j+4]; } __syncthreads(); // Seconda reduction, generazione 4 sad 8x8 // 4 | 12 | 20 | 28 <- (8 * i) + 4 // ^ | ^ | ^ | ^ // 0 1 8 9 | 2 3 10 11 | 16 17 24 25 | 18 19 26 27 <- [j j+1 j+8 j+9] for (i = 0; i < 4; i++) { j = 2 * i + (i / 2) * 12; // genera 0, 2, 16, 18 per i = 0 .. 3 diff[TID ][(8 * i) + 4] = diff[TID ][j] + diff[TID ][j + 1] + diff[TID ][j + 8] + diff[TID ][j + 9]; diff[TID+32][(8 * i) + 4] = diff[TID+32][j] + diff[TID+32][j + 1] + diff[TID+32][j + 8] + diff[TID+32][j + 9]; diff[TID+64][(8 * i) + 4] = diff[TID+64][j] + diff[TID+64][j + 1] + diff[TID+64][j + 8] + diff[TID+64][j + 9]; diff[TID+96][(8 * i) + 4] = diff[TID+96][j] + diff[TID+96][j + 1] + diff[TID+96][j + 8] + diff[TID+96][j + 9]; } __syncthreads(); // Terza reduction (a), generazione 2 sad 8x16 // 8x16 // 22 | 30 <- 22 + (i * 8) // ^ | ^ // 4 20 | 12 28 for (i = 0; i < 2; i++) { j = 4 + (8 * i); // genera 4, 12 per i = 0..1 diff[TID ][22 + (i * 8)] = diff[TID ][j] + diff[TID ][j + 16]; diff[TID+32][22 + (i * 8)] = diff[TID+32][j] + diff[TID+32][j + 16]; diff[TID+64][22 + (i * 8)] = diff[TID+64][j] + diff[TID+64][j + 16]; diff[TID+96][22 + (i * 8)] = diff[TID+96][j] + diff[TID+96][j + 16]; } __syncthreads(); // potrebbe non servire! // Terza reduction (b), generazione 2 sad 16x8 // 16x8 // 6 | 14 <- 6*(i+1) + 2*i = 8 * i + 6 // ^ | ^ // 4 12 | 20 28 <- [j j+8] for (i = 0; i < 2; i++) { j = 4 + (16 * i); // genera 4, 20 per i = 0..1 diff[TID ][8 * i + 6] = diff[TID ][j] + diff[TID ][j + 8]; diff[TID+32][8 * i + 6] = diff[TID+32][j] + diff[TID+32][j + 8]; diff[TID+64][8 * i + 6] = diff[TID+64][j] + diff[TID+64][j + 8]; diff[TID+96][8 * i + 6] = diff[TID+96][j] + diff[TID+96][j + 8]; } __syncthreads(); // Quarta reduction, generazione 1 sad 16x16 // 31 // ^ // 6 14 diff[TID ][31] = diff[TID ][6] + diff[TID ][14]; diff[TID+32][31] = diff[TID+32][6] + diff[TID+32][14]; diff[TID+64][31] = diff[TID+64][6] + diff[TID+64][14]; diff[TID+96][31] = diff[TID+96][6] + diff[TID+96][14]; __syncthreads(); // Ricerca del minimo di ogni colonna. A noi interessano 25 delle 32 colonne, // ma per non creare divergenza tra i thread eseguiamo la ricerca anche dove non serve minpos[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo una colonna for( i = 1; i < 128; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos[TID] = i; } } // Salva mv 16x16 // Questo potrebbe essere fatto meglio, conj 25 thread che lavorano contemporaneamente, // ma devo studiare come indicizzare l'accesso alla matrice globale. if ( TID == 31 ) { MVs_g[MBoffset].as_mv.row = MV_16x12_lookup_split[ minpos[TID] ].row * 8; MVs_g[MBoffset].as_mv.col = MV_16x12_lookup_split[ minpos[TID] ].col * 8; } if (split_on == SPLITMV_ON) { // salva mv 4x4 if ( TID < 16 ) { MVs_split_g[MBoffset*24 + TID].as_mv.row = MV_16x12_lookup_split[ minpos[TID + (TID / 4) * 4] ].row * 8; MVs_split_g[MBoffset*24 + TID].as_mv.col = MV_16x12_lookup_split[ minpos[TID + (TID / 4) * 4] ].col * 8; } // salva mv 8x8 if ( TID < 4 ) { MVs_split_g[MBoffset*24 + 16 + TID].as_mv.row = MV_16x12_lookup_split[ minpos[8 * TID + 4] ].row * 8; MVs_split_g[MBoffset*24 + 16 + TID].as_mv.col = MV_16x12_lookup_split[ minpos[8 * TID + 4] ].col * 8; } // salva mv 8x16 e 16x8 if ( TID < 2 ) { MVs_split_g[MBoffset*24 + 20 + TID].as_mv.row = MV_16x12_lookup_split[ minpos[8 * TID + 22] ].row * 8; MVs_split_g[MBoffset*24 + 22 + TID].as_mv.row = MV_16x12_lookup_split[ minpos[8 * TID + 6] ].row * 8; MVs_split_g[MBoffset*24 + 20 + TID].as_mv.col = MV_16x12_lookup_split[ minpos[8 * TID + 22] ].col * 8; MVs_split_g[MBoffset*24 + 22 + TID].as_mv.col = MV_16x12_lookup_split[ minpos[8 * TID + 6] ].col * 8; } __syncthreads(); } /////////////////////////////////////////////////////////////////////////////////////////// // STEP 2: pixel-scale Motion Vector Search // 1. // Ricerca di un MV per ogni blocco 4x4 // 16 blocchi, 2 thread per blocco. Stesso schema per decidere TID => thread 0 e 4 fanno 1 blocco; 1 e 5 il secondo, ecc... // Risultati sad memorizzati in diff[i][TID] con 0 < i < 15 // Questa volta non possiamo piu' sfruttare che refptr punti alla stesso indice, quindi posso // calcolare contemporaneamente ogni sad per tid e accumulare, ma posso sfruttare il // parallelismo tra mv dello stesso tipo: prima calcolo in parall tutte le 4x4, poi le 8x8, ecc... if (split_on == SPLITMV_ON) { // Update refpointer al miglior mv j = (TID % 4) + (TID / 8) * 8; // Genera 0 1 2 3 0 1 2 3 8 9 10 11 8 9 10 11 16 17... // perche' TID 0 e 4 vengono traslati dello stesso mv corrispondente // a quello ora presente in colonna 0 di minpos refptr += offset_16x12__[minpos[j]]; for (i = 0; i < 16; i++) { const uint8_t *refp = refptr + offset_16x12_refin__[i]; int32_t sad_result; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); if ( TID < 16 ) { for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID][j] += diff[TID][j+4]; } } minpos_refin[TID] = 0; __syncthreads(); for( i = 1; i < 16; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } // salva MV della split 4x4 if ( TID < 16 ) { MVs_split_g[MBoffset*24 + TID].as_mv.row += (MV_lookup_refin_split[ minpos_refin[TID + (TID / 4) * 4] ].row * 8); MVs_split_g[MBoffset*24 + TID].as_mv.col += (MV_lookup_refin_split[ minpos_refin[TID + (TID / 4) * 4] ].col * 8); } // 2. // Ricerca di un mv per ogni blocco 8x8 // Procedura esattamente identica alla precedente: TID che elaborano stesso blocco avranno // mv impostato coerentemente. Differente accumulazione (per blocco 0: TID 0 1 4 5 8 9 12 13) // Update refpointer al miglior mv j = (TID / 8) * 8 + 4; // Genera 4 4 4 4 4 4 4 4 12 12 12 12 12 12 12 12 20 20 20 20... refptr = ref_frame + im_offset + offset_16x12__[ minpos[j] ]; for (i = 0; i < 16; i++) { const uint8_t *refp = refptr + offset_16x12_refin__[i]; int32_t sad_result; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); // Sono pigro, copio e incollo la stessa manfrina if ( TID < 16 ) { for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID][j] += diff[TID][j+4]; } } __syncthreads(); if ( TID < 16 ) { for (i = 0; i < 4; i++) { j = 2 * i + (i / 2) * 12; diff[TID][(8 * i) + 4] = diff[TID][j] + diff[TID][j + 1] + diff[TID][j + 8] + diff[TID][j + 9]; } } __syncthreads(); minpos_refin[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo ogni colonna // anche se le colonne interessanti sono solo la 4, 12, 20 e 28 for( i = 1; i < 16; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } __syncthreads(); // Salva i MV della split 8x8 if ( TID < 4 ) { MVs_split_g[MBoffset*24 + 16 + TID].as_mv.row += (MV_lookup_refin_split[ minpos_refin[8 * TID + 4] ].row * 8); MVs_split_g[MBoffset*24 + 16 + TID].as_mv.col += (MV_lookup_refin_split[ minpos_refin[8 * TID + 4] ].col * 8); } } // 5. // Refining search su blocco 16x16 // Update RefPointer to the best motion vector refptr = ref_frame + im_offset + offset_16x12__[ minpos[31] ]; for (i = 0; i < 16; i++) { const uint8_t *refp = refptr + offset_16x12_refin__[i]; int32_t sad_result; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); for (i=0; i<16; i++) diff[TID][i] += diff[TID][i+16]; __syncthreads(); for (i=0; i<8; i++) diff[TID][i] += diff[TID][i+8]; __syncthreads(); for (i=0; i<4; i++) diff[TID][i] += diff[TID][i+4]; __syncthreads(); diff[TID][0] += (diff[TID][1] + diff[TID][2] + diff[TID][3]); __syncthreads(); minpos[TID] = TID; __syncthreads(); // qui posso lasciare il vecchio modo di trovare il minimo, funziona lo stesso if( TID < 8 ) if( diff[TID][0] > diff[TID+8][0] ) { diff[TID][0] = diff[TID+8][0]; minpos[TID] = minpos[TID+8]; } __syncthreads(); if( TID < 4 ) if( diff[TID][0] > diff[TID + 4][0] ) { diff[TID][0] = diff[TID + 4][0]; minpos[TID] = minpos[TID + 4]; } __syncthreads(); if( TID < 2 ) if( diff[TID][0] > diff[TID + 2][0] ) { diff[TID][0] = diff[TID + 2][0]; minpos[TID] = minpos[TID + 2]; } __syncthreads(); if( TID == 0 ) { if( diff[0][0] > diff[1][0] ) { diff[0][0] = diff[1][0]; minpos[0] = minpos[1]; } MVs_g[MBoffset].as_mv.row += (MV_lookup_refin_split[ minpos[0] ].row * 8); MVs_g[MBoffset].as_mv.col += (MV_lookup_refin_split[ minpos[0] ].col * 8); } } void me_kernel_launch_split( VP8_COMMON * const common, const uint8_t * const in_frame, const uint8_t * const ref_frame, int const streamID, int const split_on, int_mv * const MVs, int_mv * const MVs_split ) { #if CUDA_VERBOSE float elapsedTime; cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); CHECK(cudaEventRecord(start)); #endif me_cuda_split <<< common->GPU.gridDim, common->GPU.blockDim, 0, common->GPU.streams.frame[streamID] >>> (in_frame, ref_frame, streamID, common->GPU.streamSize, common->gpu_frame.stride, common->gpu_frame.width, common->gpu_frame.num_MB_width, split_on, MVs, MVs_split ); #if CUDA_VERBOSE CHECK(cudaEventRecord(stop)); CHECK(cudaEventSynchronize(stop)); CHECK(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("\n[GPU] ME elapsed time streams[%d]: %.4f ms\n",streamID,elapsedTime); CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); add_STATS((double)elapsedTime,0); #endif } void me_cuda_launch_interleaved_split( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { //int MV_size_16 = 16*sizeof(int_mv); int MV_size_16 = cm->GPU.streamSize * sizeof(int_mv); // for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int t = 0; t < cm->GPU.num_mb16th; t++) { int s = cm->GPU.streamLaunchOrder[t]; //int offset = 16*s; int offset = cm->GPU.streamSize * s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e cudaMemcpyAsync andava a leggere oltre i limiti degli array if (offset + cm->GPU.streamSize > cm->gpu_frame.num_mv) MV_size_16 = ( offset + cm->GPU.streamSize - cm->gpu_frame.num_mv ) * sizeof( int_mv ); if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, SPLITMV_ON, (cm->gpu_frame.MVs_g)[0], cm->gpu_frame.MVs_split_g ); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_split_h)[offset],&(cm->gpu_frame.MVs_split_g)[offset],24 * MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[1], 0 ); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[2], 0 ); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } void me_cuda_launch_not_interleaved_split( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { //int MV_size_16 = 16*sizeof(int_mv); int MV_size_16 = cm->GPU.streamSize * sizeof(int_mv); // for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int t = 0; t < cm->GPU.num_mb16th; t++) { int s = cm->GPU.streamLaunchOrder[t]; //int offset = 16*s; int offset = cm->GPU.streamSize * s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e cudaMemcpyAsync andava a leggere oltre i limiti degli array if (offset + cm->GPU.streamSize > cm->gpu_frame.num_mv) MV_size_16 = ( offset + cm->GPU.streamSize - cm->gpu_frame.num_mv ) * sizeof( int_mv ); if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, SPLITMV_ON, (cm->gpu_frame.MVs_g)[0], cm->gpu_frame.MVs_split_g ); // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[1], 0 ); // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[2], 0 ); if (ref_frame_flags & GPUFLAG_LAST_FRAME) { CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16,cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_split_h)[offset],&(cm->gpu_frame.MVs_split_g)[offset],24 * MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } #endif /* HAVE_CUDA_ENABLED_DEVICE */ #ifdef __cplusplus } #endif
34862da38aaafcbba512a9234427670a04582446.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "header.h" #include <iostream> #include <string> #include <fstream> #include<stdlib.h> #include <stdio.h> #include<time.h> #include<hip/device_functions.h> #include<cuda.h> #include<math.h> using namespace std; //currently using LIF for spike learning __global__ void check_weight (Neuron *NeuronList, int network_size){ int blockId = blockIdx.x + blockIdx.y * gridDim.x; int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(index>network_size){ return; } //printf("|"); int i = 0; while(NeuronList[index].connected_in[i] > 0.1){ if(NeuronList[index].connected_weight[i]>1.0){ printf("connection%d---->%d_has_changed_weight:%f\n",i,index,NeuronList[index].connected_weight[i]); } i++; } } __global__ void check_total_spike (float *log_total_spike, int network_size){ int blockId = blockIdx.x + blockIdx.y * gridDim.x; int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(index>network_size){ return; } //printf("|"); printf("spikeNo_ofNeuronNo%d_is_%f\n",index,log_total_spike[index]); } void print_connected_in_weight_old(Neuron *NeuronList, int output_index_start, int output_index_stop, int plot){ float weight_max = 0; float weight_min = 10000; for(int i=0;i<(output_index_stop-output_index_start);i++){ for (int y=0; y<28; ++y) { for (int x=0; x<28; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); //std::cout << std::to_string(int(100*(NeuronList[output_index_start+i].connected_weight[y*28+x]))) << ' '; if(NeuronList[output_index_start+i].connected_weight[y*28+x]>weight_max){ weight_max = NeuronList[output_index_start+i].connected_weight[y*28+x]; } if(NeuronList[output_index_start+i].connected_weight[y*28+x]<weight_min){ weight_min = NeuronList[output_index_start+i].connected_weight[y*28+x]; } } //std::cout << std::endl; } //cout<<"\n\n\n"; } //printf("weightMaxIs%f;weightMinIs%f\n", weight_max, weight_min); if(plot){ float weight_diff = weight_max - weight_min; cimg_library::CImg<unsigned char> image("color_small.jpg"); /* cimg_library::CImgDisplay display(image, "Click a point"); while (!display.is_closed()) { display.wait(); if (display.button() && display.mouse_y() >= 0 && display.mouse_x() >= 0) { const int y = display.mouse_y(); const int x = display.mouse_x(); unsigned char randomColor[3]; randomColor[0] = rand() % 256; randomColor[1] = rand() % 256; randomColor[2] = rand() % 256; image.draw_point(x, y, randomColor); } image.display(display); } */ for(int i=0;i<(output_index_stop-output_index_start);i++){ int current_index = i+output_index_start; int img_i; int img_j; int img_k; for (img_i=0;img_i<input_image_w;img_i++){ for (img_j=0;img_j<input_image_l;img_j++){ for(img_k=0;img_k<3;img_k++){ float weight_raw = NeuronList[current_index].connected_weight[img_i*input_image_w+img_j]; image(img_j, img_i, 0, img_k) = 255*(weight_raw-weight_min)/weight_diff; //printf("pixel%d, %d, signal is: %f \n",img_i, img_j, img_temp); } } } /* cimg_library::CImgDisplay main_disp(image,"Synapse_Conductance"); while (!main_disp.is_closed()) { main_disp.wait(); } */ string out_file_name = "weight_out_index_"+to_string(current_index)+".jpg"; image.save(out_file_name.c_str()); } } } void print_connected_in_weight(Neuron *NeuronList, int output_index_start, int output_index_stop, int plot, string plot_prefix){ float weight_max = 0; float weight_min = 10000; for(int i=0;i<(output_index_stop-output_index_start);i++){ for (int y=0; y<input_image_w; ++y) { for (int x=0; x<input_image_l; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); //std::cout << std::to_string(int(100*(NeuronList[output_index_start+i].connected_weight[y*28+x]))) << ' '; if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]>weight_max){ weight_max = NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]; } if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]<weight_min){ weight_min = NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]; } } //std::cout << std::endl; } //cout<<"\n\n\n"; } //printf("weightMaxIs%f;weightMinIs%f\n", weight_max, weight_min); if(plot){ float weight_diff = weight_max - weight_min; cimg_library::CImg<unsigned char> image("color.jpg"); image.resize(input_image_w, input_image_l); /* cimg_library::CImgDisplay display(image, "Click a point"); while (!display.is_closed()) { display.wait(); if (display.button() && display.mouse_y() >= 0 && display.mouse_x() >= 0) { const int y = display.mouse_y(); const int x = display.mouse_x(); unsigned char randomColor[3]; randomColor[0] = rand() % 256; randomColor[1] = rand() % 256; randomColor[2] = rand() % 256; image.draw_point(x, y, randomColor); } image.display(display); } */ for(int i=0;i<(output_index_stop-output_index_start);i++){ int current_index = i+output_index_start; int img_i; int img_j; int img_k; weight_max = 0; weight_min = 10000; for (int y=0; y<input_image_w; ++y) { for (int x=0; x<input_image_l; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); //std::cout << std::to_string(int(100*(NeuronList[output_index_start+i].connected_weight[y*28+x]))) << ' '; if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]>1){ NeuronList[output_index_start+i].connected_weight[y*input_image_l+x] = 1; } if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]<-1){ NeuronList[output_index_start+i].connected_weight[y*input_image_l+x] = -1; } } //std::cout << std::endl; } for (int y=0; y<input_image_w; ++y) { for (int x=0; x<input_image_l; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); //std::cout << std::to_string(int(100*(NeuronList[output_index_start+i].connected_weight[y*28+x]))) << ' '; if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]>weight_max){ weight_max = NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]; } if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]<weight_min){ weight_min = NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]; } } //std::cout << std::endl; } weight_diff = weight_max - weight_min; int pix_count = 0; bool plot_three_channel = true; if(input_image_channel==1) plot_three_channel = false; if(input_image_channel==1) bool plot_three_channel = false; for(img_k=0;img_k<3;img_k++){ for (img_i=0;img_i<input_image_w;img_i++){ for (img_j=0;img_j<input_image_l;img_j++){ pix_count = img_k*input_image_w*input_image_l + img_i*input_image_l + img_j; float weight_raw = NeuronList[current_index].connected_weight[img_i*input_image_l+img_j]; if(plot_three_channel) weight_raw = NeuronList[current_index].connected_weight[pix_count]; image(img_j, img_i, 0, img_k) = 255*(weight_raw-weight_min)/weight_diff; //pix_count ++; //printf("pixel%d, %d, signal is: %f \n",img_i, img_j, img_temp); } } } /* cimg_library::CImgDisplay main_disp(image,"Synapse_Conductance"); while (!main_disp.is_closed()) { main_disp.wait(); } */ string out_file_name = plot_prefix + "weight_out_index_"+to_string(current_index)+".jpg"; image.save(out_file_name.c_str()); } } } void print_signal(Neuron *NeuronList, int start, int end){ for (int y=0; y<input_image_l; ++y) { for (int x=0; x<input_image_w; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); int index = start + x + y*28; if(index>end) return; std::cout << std::to_string(int(NeuronList[index].state[1])) << ' '; } std::cout << std::endl; } } void data_check(Neuron *NeuronList, float *log_total_spike, int network_size, int mnist_start_index, int mnist_end_index, int function_select, string plot_prefix){ int SIZE_PER_SIDE = sqrt(network_size)+1; dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock ); dim3 dimGrid( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1)); int plot = 1; int start_index = 0; int end_index = start_index+30; if(function_select==0){ hipLaunchKernelGGL(( check_weight), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, network_size); } if(function_select==1){ hipLaunchKernelGGL(( check_total_spike), dim3(dimGrid), dim3(dimBlock), 0, 0, log_total_spike, network_size); } if(function_select==2){ if(plot>0){ cout<<"Saving conductance visualization"<<endl; print_connected_in_weight(NeuronList, start_index, end_index, 1, plot_prefix); }else{ print_connected_in_weight(NeuronList, start_index, end_index, 0, plot_prefix); } } if(function_select==3){ print_signal(NeuronList, mnist_start_index, mnist_end_index); } }
34862da38aaafcbba512a9234427670a04582446.cu
#include "header.h" #include <iostream> #include <string> #include <fstream> #include<stdlib.h> #include <stdio.h> #include<time.h> #include<device_functions.h> #include<cuda.h> #include<math.h> using namespace std; //currently using LIF for spike learning __global__ void check_weight (Neuron *NeuronList, int network_size){ int blockId = blockIdx.x + blockIdx.y * gridDim.x; int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(index>network_size){ return; } //printf("|"); int i = 0; while(NeuronList[index].connected_in[i] > 0.1){ if(NeuronList[index].connected_weight[i]>1.0){ printf("connection%d---->%d_has_changed_weight:%f\n",i,index,NeuronList[index].connected_weight[i]); } i++; } } __global__ void check_total_spike (float *log_total_spike, int network_size){ int blockId = blockIdx.x + blockIdx.y * gridDim.x; int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(index>network_size){ return; } //printf("|"); printf("spikeNo_ofNeuronNo%d_is_%f\n",index,log_total_spike[index]); } void print_connected_in_weight_old(Neuron *NeuronList, int output_index_start, int output_index_stop, int plot){ float weight_max = 0; float weight_min = 10000; for(int i=0;i<(output_index_stop-output_index_start);i++){ for (int y=0; y<28; ++y) { for (int x=0; x<28; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); //std::cout << std::to_string(int(100*(NeuronList[output_index_start+i].connected_weight[y*28+x]))) << ' '; if(NeuronList[output_index_start+i].connected_weight[y*28+x]>weight_max){ weight_max = NeuronList[output_index_start+i].connected_weight[y*28+x]; } if(NeuronList[output_index_start+i].connected_weight[y*28+x]<weight_min){ weight_min = NeuronList[output_index_start+i].connected_weight[y*28+x]; } } //std::cout << std::endl; } //cout<<"\n\n\n"; } //printf("weightMaxIs%f;weightMinIs%f\n", weight_max, weight_min); if(plot){ float weight_diff = weight_max - weight_min; cimg_library::CImg<unsigned char> image("color_small.jpg"); /* cimg_library::CImgDisplay display(image, "Click a point"); while (!display.is_closed()) { display.wait(); if (display.button() && display.mouse_y() >= 0 && display.mouse_x() >= 0) { const int y = display.mouse_y(); const int x = display.mouse_x(); unsigned char randomColor[3]; randomColor[0] = rand() % 256; randomColor[1] = rand() % 256; randomColor[2] = rand() % 256; image.draw_point(x, y, randomColor); } image.display(display); } */ for(int i=0;i<(output_index_stop-output_index_start);i++){ int current_index = i+output_index_start; int img_i; int img_j; int img_k; for (img_i=0;img_i<input_image_w;img_i++){ for (img_j=0;img_j<input_image_l;img_j++){ for(img_k=0;img_k<3;img_k++){ float weight_raw = NeuronList[current_index].connected_weight[img_i*input_image_w+img_j]; image(img_j, img_i, 0, img_k) = 255*(weight_raw-weight_min)/weight_diff; //printf("pixel%d, %d, signal is: %f \n",img_i, img_j, img_temp); } } } /* cimg_library::CImgDisplay main_disp(image,"Synapse_Conductance"); while (!main_disp.is_closed()) { main_disp.wait(); } */ string out_file_name = "weight_out_index_"+to_string(current_index)+".jpg"; image.save(out_file_name.c_str()); } } } void print_connected_in_weight(Neuron *NeuronList, int output_index_start, int output_index_stop, int plot, string plot_prefix){ float weight_max = 0; float weight_min = 10000; for(int i=0;i<(output_index_stop-output_index_start);i++){ for (int y=0; y<input_image_w; ++y) { for (int x=0; x<input_image_l; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); //std::cout << std::to_string(int(100*(NeuronList[output_index_start+i].connected_weight[y*28+x]))) << ' '; if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]>weight_max){ weight_max = NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]; } if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]<weight_min){ weight_min = NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]; } } //std::cout << std::endl; } //cout<<"\n\n\n"; } //printf("weightMaxIs%f;weightMinIs%f\n", weight_max, weight_min); if(plot){ float weight_diff = weight_max - weight_min; cimg_library::CImg<unsigned char> image("color.jpg"); image.resize(input_image_w, input_image_l); /* cimg_library::CImgDisplay display(image, "Click a point"); while (!display.is_closed()) { display.wait(); if (display.button() && display.mouse_y() >= 0 && display.mouse_x() >= 0) { const int y = display.mouse_y(); const int x = display.mouse_x(); unsigned char randomColor[3]; randomColor[0] = rand() % 256; randomColor[1] = rand() % 256; randomColor[2] = rand() % 256; image.draw_point(x, y, randomColor); } image.display(display); } */ for(int i=0;i<(output_index_stop-output_index_start);i++){ int current_index = i+output_index_start; int img_i; int img_j; int img_k; weight_max = 0; weight_min = 10000; for (int y=0; y<input_image_w; ++y) { for (int x=0; x<input_image_l; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); //std::cout << std::to_string(int(100*(NeuronList[output_index_start+i].connected_weight[y*28+x]))) << ' '; if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]>1){ NeuronList[output_index_start+i].connected_weight[y*input_image_l+x] = 1; } if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]<-1){ NeuronList[output_index_start+i].connected_weight[y*input_image_l+x] = -1; } } //std::cout << std::endl; } for (int y=0; y<input_image_w; ++y) { for (int x=0; x<input_image_l; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); //std::cout << std::to_string(int(100*(NeuronList[output_index_start+i].connected_weight[y*28+x]))) << ' '; if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]>weight_max){ weight_max = NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]; } if(NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]<weight_min){ weight_min = NeuronList[output_index_start+i].connected_weight[y*input_image_l+x]; } } //std::cout << std::endl; } weight_diff = weight_max - weight_min; int pix_count = 0; bool plot_three_channel = true; if(input_image_channel==1) plot_three_channel = false; if(input_image_channel==1) bool plot_three_channel = false; for(img_k=0;img_k<3;img_k++){ for (img_i=0;img_i<input_image_w;img_i++){ for (img_j=0;img_j<input_image_l;img_j++){ pix_count = img_k*input_image_w*input_image_l + img_i*input_image_l + img_j; float weight_raw = NeuronList[current_index].connected_weight[img_i*input_image_l+img_j]; if(plot_three_channel) weight_raw = NeuronList[current_index].connected_weight[pix_count]; image(img_j, img_i, 0, img_k) = 255*(weight_raw-weight_min)/weight_diff; //pix_count ++; //printf("pixel%d, %d, signal is: %f \n",img_i, img_j, img_temp); } } } /* cimg_library::CImgDisplay main_disp(image,"Synapse_Conductance"); while (!main_disp.is_closed()) { main_disp.wait(); } */ string out_file_name = plot_prefix + "weight_out_index_"+to_string(current_index)+".jpg"; image.save(out_file_name.c_str()); } } } void print_signal(Neuron *NeuronList, int start, int end){ for (int y=0; y<input_image_l; ++y) { for (int x=0; x<input_image_w; ++x) { //std::cout << ((image[y*28+x] == 0.0)? ' ' : '*'); int index = start + x + y*28; if(index>end) return; std::cout << std::to_string(int(NeuronList[index].state[1])) << ' '; } std::cout << std::endl; } } void data_check(Neuron *NeuronList, float *log_total_spike, int network_size, int mnist_start_index, int mnist_end_index, int function_select, string plot_prefix){ int SIZE_PER_SIDE = sqrt(network_size)+1; dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock ); dim3 dimGrid( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1)); int plot = 1; int start_index = 0; int end_index = start_index+30; if(function_select==0){ check_weight<<<dimGrid, dimBlock>>>(NeuronList, network_size); } if(function_select==1){ check_total_spike<<<dimGrid, dimBlock>>>(log_total_spike, network_size); } if(function_select==2){ if(plot>0){ cout<<"Saving conductance visualization"<<endl; print_connected_in_weight(NeuronList, start_index, end_index, 1, plot_prefix); }else{ print_connected_in_weight(NeuronList, start_index, end_index, 0, plot_prefix); } } if(function_select==3){ print_signal(NeuronList, mnist_start_index, mnist_end_index); } }
3997ba23209a59a1e3462641926660794285e1a6.hip
// !!! This is a file automatically generated by hipify!!! ////////////////////////////////////////////////////////////////////////// ////This is the code implementation for GPU Premier League Round 1 ////////////////////////////////////////////////////////////////////////// #include <iostream> #include <fstream> #include <vector> #include <chrono> #include <hip/hip_runtime.h> using namespace std; ////////////////////////////////////////////////////////////////////////// ////TODO 0: Please replace the following strings with your team name and author names ////Note: Please do not use space in the string, use "_" instead ////////////////////////////////////////////////////////////////////////// namespace name { std::string team="using_namespace_std;"; std::string author_1="Jeff Liu"; }; ////This is a matrix class to carry out linear algebra operations on both GPU and CPU ////It is the same as the sample code I showed in class on Week 3. ////NOTICE: You do not have to change the implementation in this class. ////But if you do want to change part of it for performance reasons, please let us known by writting a submission note on Canvas. class Matrix{ public: int m=0; ////number of rows int n=0; ////number of columns vector<float> elements_on_host; ////we use a std::vector for the element array on host float* elements_on_dev=0; ////we use a pointer for the element array on device bool on_host=true; ////constructors __host__ Matrix(){} __host__ Matrix(const int _m,const int _n,bool _on_host=true) { on_host=_on_host; if(on_host)Resize_On_Host(_m,_n); else Resize_On_Device(_m,_n); } ////destructor __host__ ~Matrix() { if(!on_host&&elements_on_dev!=0) hipFree(elements_on_dev); } ////Resize on host or device __host__ void Resize_On_Host(const int _m,const int _n) { if(m==_m&&n==_n)return; m=_m; n=_n; elements_on_host.resize(m*n); } __host__ void Resize_On_Device(const int _m,const int _n) { if(m==_m&&n==_n)return; m=_m; n=_n; if(elements_on_dev!=0)hipFree(elements_on_dev); hipMalloc((void**)&elements_on_dev,m*n*sizeof(float)); } ////random access a matrix element inline __host__ float& operator() (const int i,const int j) { return elements_on_host[i*n+j]; } inline __host__ const float& operator() (const int i,const int j) const { return elements_on_host[i*n+j]; } ////copy data with four cases (CPU->CPU, GPU->CPU, GPU->GPU, CPU->GPU) __host__ Matrix& operator= (const Matrix& mtx) { if(on_host&&mtx.on_host){ Resize_On_Host(mtx.m,mtx.n); elements_on_host=mtx.elements_on_host; } else if(on_host&&!mtx.on_host){ Resize_On_Host(mtx.m,mtx.n); hipMemcpy(&elements_on_host[0],mtx.elements_on_dev,m*n*sizeof(float),hipMemcpyDeviceToHost); } else if(!on_host&&!mtx.on_host){ Resize_On_Device(mtx.m,mtx.n); hipMemcpy(elements_on_dev,mtx.elements_on_dev,mtx.m*n*sizeof(float),hipMemcpyDeviceToDevice); } else if(!on_host&&mtx.on_host){ Resize_On_Device(mtx.m,mtx.n); hipMemcpy(elements_on_dev,&mtx.elements_on_host[0],m*n*sizeof(float),hipMemcpyHostToDevice); } return *this; } ////print matrix elements on screen __host__ friend ostream & operator << (ostream &out,const Matrix &mtx) { if(!mtx.on_host) cout<<"Print for matrix on device is not supported."<<endl; for(int i=0;i<mtx.m;i++){ for(int j=0;j<mtx.n;j++){ out<<mtx(i,j)<<", "; } out<<std::endl; } return out; } }; ////////////////////////////////////////////////////////////////////////// ////Your tasks start! ////This is a sample implementation without using any memory hierarchy ////The function calculates C=A*B, with dimA=[Am,An], dimB=[Bm,Bn], dimC=[Am,bn], and An=Bm __global__ void Matrix_Multiplication_AB_Kernel_Poorman(const float* Ae,const float* Be,float* Ce,const int Am,const int An,const int Bn) { int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; float val=0.f; for(int k=0;k<An;k++) val+=Ae[i*An+k]*Be[k*Bn+j]; Ce[i*Bn+j]=val; } ////////////////////////////////////////////////////////////////////////// ////Task 1: implement your fast matrix-matrix multiplication in the following kernel function. ////The function parameters are the same as the sample function: ////The function calculates C=A*B, with dimA=[Am,An], dimB=[Bm,Bn], dimC=[Am,bn], and An=Bm ////////////////////////////////////////////////////////////////////////// __global__ void Matrix_Multiplication_AB_Kernel_Your_Version(const float* Ae,const float* Be,float* Ce,const int Am,const int An,const int Bn) { // initialize memory const int block_size = 32; const int num_tiles = An / block_size; __shared__ float a_shared[block_size][block_size]; __shared__ float b_shared[block_size][block_size]; __shared__ float c_shared[block_size][block_size]; // calculate 1d index of correct item on A, B, C int thr_per_block = blockDim.y * blockDim.x; int c_idx = blockIdx.y * gridDim.x * thr_per_block + threadIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; c_shared[threadIdx.y][threadIdx.x] = 0; // set everything to zero just the first time int a_idx, b_idx; for (int tile = 0; tile < num_tiles; ++tile) { // want blockIdx.x to increment a_idx = blockIdx.y * num_tiles * thr_per_block + threadIdx.y * num_tiles * blockDim.x + tile * blockDim.x + threadIdx.x; // want blockIdx.y to increment b_idx = tile * gridDim.x * thr_per_block + threadIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; a_shared[threadIdx.y][threadIdx.x] = Ae[a_idx]; b_shared[threadIdx.y][threadIdx.x] = Be[b_idx]; __syncthreads(); // lmao loop unrolling time my dudes c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][0] * b_shared[0][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][1] * b_shared[1][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][2] * b_shared[2][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][3] * b_shared[3][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][4] * b_shared[4][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][5] * b_shared[5][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][6] * b_shared[6][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][7] * b_shared[7][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][8] * b_shared[8][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][9] * b_shared[9][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][10] * b_shared[10][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][11] * b_shared[11][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][12] * b_shared[12][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][13] * b_shared[13][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][14] * b_shared[14][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][15] * b_shared[15][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][16] * b_shared[16][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][17] * b_shared[17][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][18] * b_shared[18][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][19] * b_shared[19][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][20] * b_shared[20][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][21] * b_shared[21][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][22] * b_shared[22][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][23] * b_shared[23][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][24] * b_shared[24][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][25] * b_shared[25][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][26] * b_shared[26][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][27] * b_shared[27][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][28] * b_shared[28][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][29] * b_shared[29][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][30] * b_shared[30][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][31] * b_shared[31][threadIdx.x]; __syncthreads(); } // save to global Ce[c_idx] = c_shared[threadIdx.y][threadIdx.x]; } ////This is a sample implementation without using any memory hierarchy ////The function calculates the matrix multiplication, with C=A^T*B*A, A^T is the transpose of A, dimA=[Am,An], dimB=[Am,Am], and dimC=[An,An] __global__ void Matrix_Multiplication_ATBA_Kernel_Poorman(const float* Ae,const float* Be,float* Ce,const int Am,const int An) { int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; float val=0.f; for(int l=0;l<Am;l++) for(int k=0;k<Am;k++) val+=Ae[l*An+i]*Be[l*Am+k]*Ae[k*An+j]; Ce[i*An+j]=val; } ////////////////////////////////////////////////////////////////////////// ////Task 2: calculate the matrix multiplication in the following kernel function. ////The function parameters are the same as the sample function: ////The function calculates the matrix multiplication, with C=A^T*B*A, A^T is the transpose of A, dimA=[Am,An], dimB=[Am,Am], and dimC=[An,An] ////////////////////////////////////////////////////////////////////////// __global__ void Matrix_Multiplication_ATBA_Kernel_Your_Version(const float* Ae,const float* Be,float* Ce,const int Am,const int An) { // memory setup const int num_tiles = Am / 32; __shared__ float aTT_shared[32][32]; __shared__ float b_shared[32][32]; __shared__ float a_shared[32][32]; __shared__ float accum_shared[32][32]; __shared__ float c_shared[32][32]; // coordinate setup int thr_per_block = blockDim.y * blockDim.x; int c_idx = blockIdx.y*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; int a_idx, b_idx, aTT_idx; // initialize memory c_shared[threadIdx.y][threadIdx.x] = 0; // following psuedocode coordinates are (y,x) for (int ay = 0; ay < num_tiles; ++ay) { //ay = bx // load a(ay,blockIdx.x) a_idx = ay*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; a_shared[threadIdx.y][threadIdx.x] = Ae[a_idx]; // clear accumulator accum_shared[threadIdx.y][threadIdx.x] = 0; __syncthreads(); for (int by = 0; by < num_tiles; ++by) { // by = aTx = aTTy // calculate indices b_idx = by*num_tiles*thr_per_block + threadIdx.y*num_tiles*blockDim.x + ay*blockDim.x + threadIdx.x; aTT_idx = by*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.y*blockDim.x + threadIdx.x; // load aTT(by, blockIdx.y) (since we load A but column access) and b(by,ay) b_shared[threadIdx.y][threadIdx.x] = Be[b_idx]; aTT_shared[threadIdx.y][threadIdx.x] = Ae[aTT_idx]; __syncthreads(); // multiply aT x b, accumulate accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[0][threadIdx.y] * b_shared[0][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[1][threadIdx.y] * b_shared[1][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[2][threadIdx.y] * b_shared[2][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[3][threadIdx.y] * b_shared[3][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[4][threadIdx.y] * b_shared[4][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[5][threadIdx.y] * b_shared[5][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[6][threadIdx.y] * b_shared[6][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[7][threadIdx.y] * b_shared[7][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[8][threadIdx.y] * b_shared[8][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[9][threadIdx.y] * b_shared[9][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[10][threadIdx.y] * b_shared[10][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[11][threadIdx.y] * b_shared[11][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[12][threadIdx.y] * b_shared[12][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[13][threadIdx.y] * b_shared[13][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[14][threadIdx.y] * b_shared[14][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[15][threadIdx.y] * b_shared[15][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[16][threadIdx.y] * b_shared[16][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[17][threadIdx.y] * b_shared[17][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[18][threadIdx.y] * b_shared[18][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[19][threadIdx.y] * b_shared[19][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[20][threadIdx.y] * b_shared[20][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[21][threadIdx.y] * b_shared[21][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[22][threadIdx.y] * b_shared[22][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[23][threadIdx.y] * b_shared[23][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[24][threadIdx.y] * b_shared[24][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[25][threadIdx.y] * b_shared[25][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[26][threadIdx.y] * b_shared[26][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[27][threadIdx.y] * b_shared[27][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[28][threadIdx.y] * b_shared[28][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[29][threadIdx.y] * b_shared[29][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[30][threadIdx.y] * b_shared[30][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[31][threadIdx.y] * b_shared[31][threadIdx.x]; __syncthreads(); } // multiply accum x a, add to c c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][0] * a_shared[0][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][1] * a_shared[1][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][2] * a_shared[2][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][3] * a_shared[3][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][4] * a_shared[4][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][5] * a_shared[5][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][6] * a_shared[6][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][7] * a_shared[7][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][8] * a_shared[8][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][9] * a_shared[9][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][10] * a_shared[10][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][11] * a_shared[11][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][12] * a_shared[12][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][13] * a_shared[13][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][14] * a_shared[14][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][15] * a_shared[15][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][16] * a_shared[16][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][17] * a_shared[17][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][18] * a_shared[18][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][19] * a_shared[19][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][20] * a_shared[20][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][21] * a_shared[21][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][22] * a_shared[22][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][23] * a_shared[23][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][24] * a_shared[24][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][25] * a_shared[25][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][26] * a_shared[26][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][27] * a_shared[27][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][28] * a_shared[28][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][29] * a_shared[29][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][30] * a_shared[30][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][31] * a_shared[31][threadIdx.x]; __syncthreads(); } // save c to global Ce[c_idx] = c_shared[threadIdx.y][threadIdx.x]; } ////////////////////////////////////////////////////////////////////////// ////Task 3: calculate the Frobenius norm of a matrix ////The definition of F-norm for a matrix is square root of (the sum of squares of all the matrix elements), i.e., F=sqrt(sum_(A_ij^2)) ////See the definition: https://mathworld.wolfram.com/FrobeniusNorm.html ////////////////////////////////////////////////////////////////////////// ////Please write your own kernel function here, and call it in the function Test_F_Norm_On_GPU to test its correctness and performance __global__ void F_Norm_On_GPU_Lazy(const float* Ae, float* sum) { // lazy man's method for reference __shared__ float a_shared[16][16]; int thr_per_block = blockDim.y * blockDim.x; int idx = blockIdx.y*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; float element = Ae[idx]; a_shared[threadIdx.y][threadIdx.x] = element * element; atomicAdd(&sum[0], a_shared[threadIdx.y][threadIdx.x]); } __global__ void F_Norm_On_GPU(const float* Ae, float* Be, bool round1) { extern __shared__ float data[]; int idx = blockIdx.x*blockDim.x*2 + threadIdx.x; // use 2 registers float num1 = Ae[idx]; float num2 = Ae[idx + blockDim.x]; // offset by stride is better for alignment // only square first time if (round1) { num1 *= num1; num2 *= num2; } // add two elements into one shared index data[threadIdx.x] = num1 + num2; __syncthreads(); // from reduce4 in class notes for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if(threadIdx.x < s){ data[threadIdx.x]+=data[threadIdx.x+s]; } __syncthreads(); } if (threadIdx.x == 0) Be[blockIdx.x] = data[0]; } ////Congratulations, your tasks are all finished! ////////////////////////////////////////////////////////////////////////// ////Here are the test functions for your three kernel implementations ofstream out; __host__ void Test_Matrix_Multiplication_AB_On_GPU(const Matrix& A,const Matrix& B,Matrix& C) { //// Load A and B to device memory Matrix A_on_dev(A.m,A.n,false); A_on_dev=A; Matrix B_on_dev(B.m,B.n,false); B_on_dev=B; //// Allocate C in device memory Matrix C_on_dev(A_on_dev.m,B_on_dev.n,false); hipEvent_t start,end; hipEventCreate(&start); hipEventCreate(&end); float gpu_time=0.0f; hipDeviceSynchronize(); hipEventRecord(start); //// Invoke kernel const int block_size=32; const int block_num_x=C.m/block_size; const int block_num_y=C.n/block_size; #ifdef POORMAN hipLaunchKernelGGL(( Matrix_Multiplication_AB_Kernel_Poorman), dim3(dim3(block_num_x,block_num_y)),dim3(dim3(block_size,block_size)), 0, 0, A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n,B_on_dev.n); #endif #ifndef POORMAN hipLaunchKernelGGL(( Matrix_Multiplication_AB_Kernel_Your_Version), dim3(dim3(block_num_y,block_num_x)),dim3(dim3(block_size,block_size)), 0, 0, A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n,B_on_dev.n); #endif hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&gpu_time,start,end); printf("\nGPU runtime for matrix multiplication AB: %.4f ms\n",gpu_time); hipEventDestroy(start); hipEventDestroy(end); //// Transfer data back to CPU C=C_on_dev; out<<"T1: "<<gpu_time<<endl; } __host__ void Test_Matrix_Multiplication_ATBA_On_GPU(const Matrix& A,const Matrix& B,Matrix& C) { //// Load A and B to device memory Matrix A_on_dev(A.m,A.n,false); A_on_dev=A; Matrix B_on_dev(B.m,B.n,false); B_on_dev=B; //// Allocate C in device memory Matrix C_on_dev(A_on_dev.n,A_on_dev.n,false); hipEvent_t start,end; hipEventCreate(&start); hipEventCreate(&end); float gpu_time=0.0f; hipDeviceSynchronize(); hipEventRecord(start); //// Invoke kernel const int block_size=32; const int block_num_x=C.m/block_size; const int block_num_y=C.n/block_size; #ifdef POORMAN hipLaunchKernelGGL(( Matrix_Multiplication_ATBA_Kernel_Poorman), dim3(dim3(block_num_x,block_num_y)),dim3(dim3(block_size,block_size)), 0, 0, A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n); #endif #ifndef POORMAN ////NOTICE: You do not have to use the block_size I specified here. You may customize the size of your grid and blocks for better performance. hipLaunchKernelGGL(( Matrix_Multiplication_ATBA_Kernel_Your_Version), dim3(dim3(block_num_y,block_num_x)),dim3(dim3(block_size,block_size)), 0, 0, A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n); #endif hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&gpu_time,start,end); printf("\nGPU runtime for matrix multiplication ATBA: %.4f ms\n",gpu_time); hipEventDestroy(start); hipEventDestroy(end); //// Transfer data back to CPU C=C_on_dev; out<<"T2: "<<gpu_time<<endl; } __host__ void Test_Matrix_F_Norm_On_GPU(const Matrix& A, float& norm) { //// Load A and B to device memory Matrix A_on_dev(A.m,A.n,false); A_on_dev=A; hipEvent_t start,end; hipEventCreate(&start); hipEventCreate(&end); float gpu_time=0.0f; hipDeviceSynchronize(); hipEventRecord(start); #ifdef POORMAN // atomic add //// Invoke kernel const int block_size=16; const int block_num_x=A.n/block_size; const int block_num_y=A.m/block_size; float *sum_dev = nullptr; hipMalloc((void**)&sum_dev, sizeof(float)); hipLaunchKernelGGL(( F_Norm_On_GPU_Lazy), dim3(dim3(block_num_x,block_num_y)), dim3(dim3(block_size,block_size)), 0, 0, A_on_dev.elements_on_dev, sum_dev); float *sum_host = (float *)malloc(4); hipMemcpy(sum_host, sum_dev, sizeof(float), hipMemcpyDeviceToHost); hipFree(sum_dev); norm = sqrt(*sum_host); free(sum_host); #endif // ifdef #ifndef POORMAN // parallel reduction const int r1_blocks = A.m; const int r1_threads = A.n / 2; const int r2_threads = A.m / 2; float *B_dev = nullptr; hipMalloc((void**)&B_dev, A.m * sizeof(float)); hipLaunchKernelGGL(( F_Norm_On_GPU), dim3(r1_blocks), dim3(r1_threads), r1_threads*sizeof(float), 0, A_on_dev.elements_on_dev, B_dev, true); hipLaunchKernelGGL(( F_Norm_On_GPU), dim3(1), dim3(r2_threads), r2_threads*sizeof(float), 0, B_dev, B_dev, false); float result = 0; hipMemcpy(&result,B_dev,sizeof(float),hipMemcpyDeviceToHost); norm = sqrt(result); hipFree(B_dev); #endif // ifndef hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&gpu_time,start,end); printf("\nGPU runtime for F norm: %.4f ms\n",gpu_time); hipEventDestroy(start); hipEventDestroy(end); out<<"T3: "<<gpu_time<<endl; } int main() { if(name::team=="Team_X"){ printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n"); return 0; } std::string file_name=name::team+"_competition_1_matrix.dat"; out.open(file_name.c_str()); if(out.fail()){ printf("\ncannot open file %s to record results\n",file_name.c_str()); return 0; } ////////////////////////////////////////////////////////////////////////// ////NOTICE: We may use a different set of parameters to evaluate your code. ////So please test your functions with different size and initial values. ////////////////////////////////////////////////////////////////////////// const int m=512; const int n=2048; const int p=1024; Matrix h_A(m,n); for(int i=0;i<m;i++){ for(int j=0;j<n;j++){ h_A(i,j) = 1; } } Matrix h_B(n,p); for(int i=0;i<n;i++){ for(int j=0;j<p;j++){ h_B(i,j) = 1; } } Matrix h_C(m,p); Matrix h_B2(m,m); for(int i=0;i<m;i++){ for(int j=0;j<m;j++){ h_B2(i,j) = 1; } } Matrix h_C2(n,n); Test_Matrix_Multiplication_AB_On_GPU(h_A,h_B,h_C); cout<<"AB result: "<<h_C(h_C.m/2,h_C.n/2)<<endl; out<<"R1: "<<h_C(h_C.m/2,h_C.n/2)<<endl; Test_Matrix_Multiplication_ATBA_On_GPU(h_A,h_B2,h_C2); cout<<"ATBA result: "<<h_C2(h_C2.m/3,h_C2.n/3)<<endl; out<<"R2: "<<h_C2(h_C2.m/3,h_C2.n/3)<<endl; float f_norm=0.f; Test_Matrix_F_Norm_On_GPU(h_A,f_norm); cout<<"F-norm result: "<<f_norm<<endl; out<<"R3: "<<f_norm<<endl; return 0; }
3997ba23209a59a1e3462641926660794285e1a6.cu
////////////////////////////////////////////////////////////////////////// ////This is the code implementation for GPU Premier League Round 1 ////////////////////////////////////////////////////////////////////////// #include <iostream> #include <fstream> #include <vector> #include <chrono> #include <cuda_runtime.h> using namespace std; ////////////////////////////////////////////////////////////////////////// ////TODO 0: Please replace the following strings with your team name and author names ////Note: Please do not use space in the string, use "_" instead ////////////////////////////////////////////////////////////////////////// namespace name { std::string team="using_namespace_std;"; std::string author_1="Jeff Liu"; }; ////This is a matrix class to carry out linear algebra operations on both GPU and CPU ////It is the same as the sample code I showed in class on Week 3. ////NOTICE: You do not have to change the implementation in this class. ////But if you do want to change part of it for performance reasons, please let us known by writting a submission note on Canvas. class Matrix{ public: int m=0; ////number of rows int n=0; ////number of columns vector<float> elements_on_host; ////we use a std::vector for the element array on host float* elements_on_dev=0; ////we use a pointer for the element array on device bool on_host=true; ////constructors __host__ Matrix(){} __host__ Matrix(const int _m,const int _n,bool _on_host=true) { on_host=_on_host; if(on_host)Resize_On_Host(_m,_n); else Resize_On_Device(_m,_n); } ////destructor __host__ ~Matrix() { if(!on_host&&elements_on_dev!=0) cudaFree(elements_on_dev); } ////Resize on host or device __host__ void Resize_On_Host(const int _m,const int _n) { if(m==_m&&n==_n)return; m=_m; n=_n; elements_on_host.resize(m*n); } __host__ void Resize_On_Device(const int _m,const int _n) { if(m==_m&&n==_n)return; m=_m; n=_n; if(elements_on_dev!=0)cudaFree(elements_on_dev); cudaMalloc((void**)&elements_on_dev,m*n*sizeof(float)); } ////random access a matrix element inline __host__ float& operator() (const int i,const int j) { return elements_on_host[i*n+j]; } inline __host__ const float& operator() (const int i,const int j) const { return elements_on_host[i*n+j]; } ////copy data with four cases (CPU->CPU, GPU->CPU, GPU->GPU, CPU->GPU) __host__ Matrix& operator= (const Matrix& mtx) { if(on_host&&mtx.on_host){ Resize_On_Host(mtx.m,mtx.n); elements_on_host=mtx.elements_on_host; } else if(on_host&&!mtx.on_host){ Resize_On_Host(mtx.m,mtx.n); cudaMemcpy(&elements_on_host[0],mtx.elements_on_dev,m*n*sizeof(float),cudaMemcpyDeviceToHost); } else if(!on_host&&!mtx.on_host){ Resize_On_Device(mtx.m,mtx.n); cudaMemcpy(elements_on_dev,mtx.elements_on_dev,mtx.m*n*sizeof(float),cudaMemcpyDeviceToDevice); } else if(!on_host&&mtx.on_host){ Resize_On_Device(mtx.m,mtx.n); cudaMemcpy(elements_on_dev,&mtx.elements_on_host[0],m*n*sizeof(float),cudaMemcpyHostToDevice); } return *this; } ////print matrix elements on screen __host__ friend ostream & operator << (ostream &out,const Matrix &mtx) { if(!mtx.on_host) cout<<"Print for matrix on device is not supported."<<endl; for(int i=0;i<mtx.m;i++){ for(int j=0;j<mtx.n;j++){ out<<mtx(i,j)<<", "; } out<<std::endl; } return out; } }; ////////////////////////////////////////////////////////////////////////// ////Your tasks start! ////This is a sample implementation without using any memory hierarchy ////The function calculates C=A*B, with dimA=[Am,An], dimB=[Bm,Bn], dimC=[Am,bn], and An=Bm __global__ void Matrix_Multiplication_AB_Kernel_Poorman(const float* Ae,const float* Be,float* Ce,const int Am,const int An,const int Bn) { int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; float val=0.f; for(int k=0;k<An;k++) val+=Ae[i*An+k]*Be[k*Bn+j]; Ce[i*Bn+j]=val; } ////////////////////////////////////////////////////////////////////////// ////Task 1: implement your fast matrix-matrix multiplication in the following kernel function. ////The function parameters are the same as the sample function: ////The function calculates C=A*B, with dimA=[Am,An], dimB=[Bm,Bn], dimC=[Am,bn], and An=Bm ////////////////////////////////////////////////////////////////////////// __global__ void Matrix_Multiplication_AB_Kernel_Your_Version(const float* Ae,const float* Be,float* Ce,const int Am,const int An,const int Bn) { // initialize memory const int block_size = 32; const int num_tiles = An / block_size; __shared__ float a_shared[block_size][block_size]; __shared__ float b_shared[block_size][block_size]; __shared__ float c_shared[block_size][block_size]; // calculate 1d index of correct item on A, B, C int thr_per_block = blockDim.y * blockDim.x; int c_idx = blockIdx.y * gridDim.x * thr_per_block + threadIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; c_shared[threadIdx.y][threadIdx.x] = 0; // set everything to zero just the first time int a_idx, b_idx; for (int tile = 0; tile < num_tiles; ++tile) { // want blockIdx.x to increment a_idx = blockIdx.y * num_tiles * thr_per_block + threadIdx.y * num_tiles * blockDim.x + tile * blockDim.x + threadIdx.x; // want blockIdx.y to increment b_idx = tile * gridDim.x * thr_per_block + threadIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; a_shared[threadIdx.y][threadIdx.x] = Ae[a_idx]; b_shared[threadIdx.y][threadIdx.x] = Be[b_idx]; __syncthreads(); // lmao loop unrolling time my dudes c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][0] * b_shared[0][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][1] * b_shared[1][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][2] * b_shared[2][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][3] * b_shared[3][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][4] * b_shared[4][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][5] * b_shared[5][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][6] * b_shared[6][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][7] * b_shared[7][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][8] * b_shared[8][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][9] * b_shared[9][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][10] * b_shared[10][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][11] * b_shared[11][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][12] * b_shared[12][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][13] * b_shared[13][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][14] * b_shared[14][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][15] * b_shared[15][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][16] * b_shared[16][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][17] * b_shared[17][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][18] * b_shared[18][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][19] * b_shared[19][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][20] * b_shared[20][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][21] * b_shared[21][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][22] * b_shared[22][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][23] * b_shared[23][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][24] * b_shared[24][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][25] * b_shared[25][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][26] * b_shared[26][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][27] * b_shared[27][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][28] * b_shared[28][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][29] * b_shared[29][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][30] * b_shared[30][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][31] * b_shared[31][threadIdx.x]; __syncthreads(); } // save to global Ce[c_idx] = c_shared[threadIdx.y][threadIdx.x]; } ////This is a sample implementation without using any memory hierarchy ////The function calculates the matrix multiplication, with C=A^T*B*A, A^T is the transpose of A, dimA=[Am,An], dimB=[Am,Am], and dimC=[An,An] __global__ void Matrix_Multiplication_ATBA_Kernel_Poorman(const float* Ae,const float* Be,float* Ce,const int Am,const int An) { int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; float val=0.f; for(int l=0;l<Am;l++) for(int k=0;k<Am;k++) val+=Ae[l*An+i]*Be[l*Am+k]*Ae[k*An+j]; Ce[i*An+j]=val; } ////////////////////////////////////////////////////////////////////////// ////Task 2: calculate the matrix multiplication in the following kernel function. ////The function parameters are the same as the sample function: ////The function calculates the matrix multiplication, with C=A^T*B*A, A^T is the transpose of A, dimA=[Am,An], dimB=[Am,Am], and dimC=[An,An] ////////////////////////////////////////////////////////////////////////// __global__ void Matrix_Multiplication_ATBA_Kernel_Your_Version(const float* Ae,const float* Be,float* Ce,const int Am,const int An) { // memory setup const int num_tiles = Am / 32; __shared__ float aTT_shared[32][32]; __shared__ float b_shared[32][32]; __shared__ float a_shared[32][32]; __shared__ float accum_shared[32][32]; __shared__ float c_shared[32][32]; // coordinate setup int thr_per_block = blockDim.y * blockDim.x; int c_idx = blockIdx.y*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; int a_idx, b_idx, aTT_idx; // initialize memory c_shared[threadIdx.y][threadIdx.x] = 0; // following psuedocode coordinates are (y,x) for (int ay = 0; ay < num_tiles; ++ay) { //ay = bx // load a(ay,blockIdx.x) a_idx = ay*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; a_shared[threadIdx.y][threadIdx.x] = Ae[a_idx]; // clear accumulator accum_shared[threadIdx.y][threadIdx.x] = 0; __syncthreads(); for (int by = 0; by < num_tiles; ++by) { // by = aTx = aTTy // calculate indices b_idx = by*num_tiles*thr_per_block + threadIdx.y*num_tiles*blockDim.x + ay*blockDim.x + threadIdx.x; aTT_idx = by*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.y*blockDim.x + threadIdx.x; // load aTT(by, blockIdx.y) (since we load A but column access) and b(by,ay) b_shared[threadIdx.y][threadIdx.x] = Be[b_idx]; aTT_shared[threadIdx.y][threadIdx.x] = Ae[aTT_idx]; __syncthreads(); // multiply aT x b, accumulate accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[0][threadIdx.y] * b_shared[0][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[1][threadIdx.y] * b_shared[1][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[2][threadIdx.y] * b_shared[2][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[3][threadIdx.y] * b_shared[3][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[4][threadIdx.y] * b_shared[4][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[5][threadIdx.y] * b_shared[5][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[6][threadIdx.y] * b_shared[6][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[7][threadIdx.y] * b_shared[7][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[8][threadIdx.y] * b_shared[8][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[9][threadIdx.y] * b_shared[9][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[10][threadIdx.y] * b_shared[10][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[11][threadIdx.y] * b_shared[11][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[12][threadIdx.y] * b_shared[12][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[13][threadIdx.y] * b_shared[13][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[14][threadIdx.y] * b_shared[14][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[15][threadIdx.y] * b_shared[15][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[16][threadIdx.y] * b_shared[16][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[17][threadIdx.y] * b_shared[17][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[18][threadIdx.y] * b_shared[18][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[19][threadIdx.y] * b_shared[19][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[20][threadIdx.y] * b_shared[20][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[21][threadIdx.y] * b_shared[21][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[22][threadIdx.y] * b_shared[22][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[23][threadIdx.y] * b_shared[23][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[24][threadIdx.y] * b_shared[24][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[25][threadIdx.y] * b_shared[25][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[26][threadIdx.y] * b_shared[26][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[27][threadIdx.y] * b_shared[27][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[28][threadIdx.y] * b_shared[28][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[29][threadIdx.y] * b_shared[29][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[30][threadIdx.y] * b_shared[30][threadIdx.x]; accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[31][threadIdx.y] * b_shared[31][threadIdx.x]; __syncthreads(); } // multiply accum x a, add to c c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][0] * a_shared[0][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][1] * a_shared[1][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][2] * a_shared[2][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][3] * a_shared[3][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][4] * a_shared[4][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][5] * a_shared[5][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][6] * a_shared[6][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][7] * a_shared[7][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][8] * a_shared[8][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][9] * a_shared[9][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][10] * a_shared[10][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][11] * a_shared[11][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][12] * a_shared[12][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][13] * a_shared[13][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][14] * a_shared[14][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][15] * a_shared[15][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][16] * a_shared[16][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][17] * a_shared[17][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][18] * a_shared[18][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][19] * a_shared[19][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][20] * a_shared[20][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][21] * a_shared[21][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][22] * a_shared[22][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][23] * a_shared[23][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][24] * a_shared[24][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][25] * a_shared[25][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][26] * a_shared[26][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][27] * a_shared[27][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][28] * a_shared[28][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][29] * a_shared[29][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][30] * a_shared[30][threadIdx.x]; c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][31] * a_shared[31][threadIdx.x]; __syncthreads(); } // save c to global Ce[c_idx] = c_shared[threadIdx.y][threadIdx.x]; } ////////////////////////////////////////////////////////////////////////// ////Task 3: calculate the Frobenius norm of a matrix ////The definition of F-norm for a matrix is square root of (the sum of squares of all the matrix elements), i.e., F=sqrt(sum_(A_ij^2)) ////See the definition: https://mathworld.wolfram.com/FrobeniusNorm.html ////////////////////////////////////////////////////////////////////////// ////Please write your own kernel function here, and call it in the function Test_F_Norm_On_GPU to test its correctness and performance __global__ void F_Norm_On_GPU_Lazy(const float* Ae, float* sum) { // lazy man's method for reference __shared__ float a_shared[16][16]; int thr_per_block = blockDim.y * blockDim.x; int idx = blockIdx.y*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; float element = Ae[idx]; a_shared[threadIdx.y][threadIdx.x] = element * element; atomicAdd(&sum[0], a_shared[threadIdx.y][threadIdx.x]); } __global__ void F_Norm_On_GPU(const float* Ae, float* Be, bool round1) { extern __shared__ float data[]; int idx = blockIdx.x*blockDim.x*2 + threadIdx.x; // use 2 registers float num1 = Ae[idx]; float num2 = Ae[idx + blockDim.x]; // offset by stride is better for alignment // only square first time if (round1) { num1 *= num1; num2 *= num2; } // add two elements into one shared index data[threadIdx.x] = num1 + num2; __syncthreads(); // from reduce4 in class notes for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if(threadIdx.x < s){ data[threadIdx.x]+=data[threadIdx.x+s]; } __syncthreads(); } if (threadIdx.x == 0) Be[blockIdx.x] = data[0]; } ////Congratulations, your tasks are all finished! ////////////////////////////////////////////////////////////////////////// ////Here are the test functions for your three kernel implementations ofstream out; __host__ void Test_Matrix_Multiplication_AB_On_GPU(const Matrix& A,const Matrix& B,Matrix& C) { //// Load A and B to device memory Matrix A_on_dev(A.m,A.n,false); A_on_dev=A; Matrix B_on_dev(B.m,B.n,false); B_on_dev=B; //// Allocate C in device memory Matrix C_on_dev(A_on_dev.m,B_on_dev.n,false); cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); float gpu_time=0.0f; cudaDeviceSynchronize(); cudaEventRecord(start); //// Invoke kernel const int block_size=32; const int block_num_x=C.m/block_size; const int block_num_y=C.n/block_size; #ifdef POORMAN Matrix_Multiplication_AB_Kernel_Poorman<<<dim3(block_num_x,block_num_y),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n,B_on_dev.n); #endif #ifndef POORMAN Matrix_Multiplication_AB_Kernel_Your_Version<<<dim3(block_num_y,block_num_x),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n,B_on_dev.n); #endif cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&gpu_time,start,end); printf("\nGPU runtime for matrix multiplication AB: %.4f ms\n",gpu_time); cudaEventDestroy(start); cudaEventDestroy(end); //// Transfer data back to CPU C=C_on_dev; out<<"T1: "<<gpu_time<<endl; } __host__ void Test_Matrix_Multiplication_ATBA_On_GPU(const Matrix& A,const Matrix& B,Matrix& C) { //// Load A and B to device memory Matrix A_on_dev(A.m,A.n,false); A_on_dev=A; Matrix B_on_dev(B.m,B.n,false); B_on_dev=B; //// Allocate C in device memory Matrix C_on_dev(A_on_dev.n,A_on_dev.n,false); cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); float gpu_time=0.0f; cudaDeviceSynchronize(); cudaEventRecord(start); //// Invoke kernel const int block_size=32; const int block_num_x=C.m/block_size; const int block_num_y=C.n/block_size; #ifdef POORMAN Matrix_Multiplication_ATBA_Kernel_Poorman<<<dim3(block_num_x,block_num_y),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n); #endif #ifndef POORMAN ////NOTICE: You do not have to use the block_size I specified here. You may customize the size of your grid and blocks for better performance. Matrix_Multiplication_ATBA_Kernel_Your_Version<<<dim3(block_num_y,block_num_x),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n); #endif cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&gpu_time,start,end); printf("\nGPU runtime for matrix multiplication ATBA: %.4f ms\n",gpu_time); cudaEventDestroy(start); cudaEventDestroy(end); //// Transfer data back to CPU C=C_on_dev; out<<"T2: "<<gpu_time<<endl; } __host__ void Test_Matrix_F_Norm_On_GPU(const Matrix& A, float& norm) { //// Load A and B to device memory Matrix A_on_dev(A.m,A.n,false); A_on_dev=A; cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); float gpu_time=0.0f; cudaDeviceSynchronize(); cudaEventRecord(start); #ifdef POORMAN // atomic add //// Invoke kernel const int block_size=16; const int block_num_x=A.n/block_size; const int block_num_y=A.m/block_size; float *sum_dev = nullptr; cudaMalloc((void**)&sum_dev, sizeof(float)); F_Norm_On_GPU_Lazy<<<dim3(block_num_x,block_num_y), dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev, sum_dev); float *sum_host = (float *)malloc(4); cudaMemcpy(sum_host, sum_dev, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(sum_dev); norm = sqrt(*sum_host); free(sum_host); #endif // ifdef #ifndef POORMAN // parallel reduction const int r1_blocks = A.m; const int r1_threads = A.n / 2; const int r2_threads = A.m / 2; float *B_dev = nullptr; cudaMalloc((void**)&B_dev, A.m * sizeof(float)); F_Norm_On_GPU<<<r1_blocks, r1_threads, r1_threads*sizeof(float)>>>(A_on_dev.elements_on_dev, B_dev, true); F_Norm_On_GPU<<<1, r2_threads, r2_threads*sizeof(float)>>>(B_dev, B_dev, false); float result = 0; cudaMemcpy(&result,B_dev,sizeof(float),cudaMemcpyDeviceToHost); norm = sqrt(result); cudaFree(B_dev); #endif // ifndef cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&gpu_time,start,end); printf("\nGPU runtime for F norm: %.4f ms\n",gpu_time); cudaEventDestroy(start); cudaEventDestroy(end); out<<"T3: "<<gpu_time<<endl; } int main() { if(name::team=="Team_X"){ printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n"); return 0; } std::string file_name=name::team+"_competition_1_matrix.dat"; out.open(file_name.c_str()); if(out.fail()){ printf("\ncannot open file %s to record results\n",file_name.c_str()); return 0; } ////////////////////////////////////////////////////////////////////////// ////NOTICE: We may use a different set of parameters to evaluate your code. ////So please test your functions with different size and initial values. ////////////////////////////////////////////////////////////////////////// const int m=512; const int n=2048; const int p=1024; Matrix h_A(m,n); for(int i=0;i<m;i++){ for(int j=0;j<n;j++){ h_A(i,j) = 1; } } Matrix h_B(n,p); for(int i=0;i<n;i++){ for(int j=0;j<p;j++){ h_B(i,j) = 1; } } Matrix h_C(m,p); Matrix h_B2(m,m); for(int i=0;i<m;i++){ for(int j=0;j<m;j++){ h_B2(i,j) = 1; } } Matrix h_C2(n,n); Test_Matrix_Multiplication_AB_On_GPU(h_A,h_B,h_C); cout<<"AB result: "<<h_C(h_C.m/2,h_C.n/2)<<endl; out<<"R1: "<<h_C(h_C.m/2,h_C.n/2)<<endl; Test_Matrix_Multiplication_ATBA_On_GPU(h_A,h_B2,h_C2); cout<<"ATBA result: "<<h_C2(h_C2.m/3,h_C2.n/3)<<endl; out<<"R2: "<<h_C2(h_C2.m/3,h_C2.n/3)<<endl; float f_norm=0.f; Test_Matrix_F_Norm_On_GPU(h_A,f_norm); cout<<"F-norm result: "<<f_norm<<endl; out<<"R3: "<<f_norm<<endl; return 0; }
06d15053e47e0c5674fad2ea5ad540beba1e896c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file cuda_uspp_kernels.cu * * \brief CUDA kernel for the PW-PW method. */ #include "../SDDK/GPU/cuda_common.hpp" #include "../SDDK/GPU/acc_runtime.hpp" extern acc_stream_t* streams; __global__ void compute_chebyshev_order1_gpu_kernel ( int num_gkvec__, double c__, double r__, acc_complex_double_t* phi0__, acc_complex_double_t* phi1__ ) { int igk = blockDim.x * blockIdx.x + threadIdx.x; int j = blockIdx.y; if (igk < num_gkvec__) { int i = array2D_offset(igk, j, num_gkvec__); // phi0 * c acc_complex_double_t z1 = accCmul(phi0__[i], make_accDoubleComplex(c__, 0)); // phi1 - phi0 * c acc_complex_double_t z2 = accCsub(phi1__[i], z1); // (phi1 - phi0 * c) / r phi1__[i] = accCdiv(z2, make_accDoubleComplex(r__, 0)); } } __global__ void compute_chebyshev_orderk_gpu_kernel ( int num_gkvec__, double c__, double r__, acc_complex_double_t* phi0__, acc_complex_double_t* phi1__, acc_complex_double_t* phi2__ ) { int igk = blockDim.x * blockIdx.x + threadIdx.x; int j = blockIdx.y; if (igk < num_gkvec__) { int i = array2D_offset(igk, j, num_gkvec__); // phi1 * c acc_complex_double_t z1 = accCmul(phi1__[i], make_accDoubleComplex(c__, 0)); // phi2 - phi1 * c acc_complex_double_t z2 = accCsub(phi2__[i], z1); // (phi2 - phi1 * c) * 2 / r acc_complex_double_t z3 = accCmul(z2, make_accDoubleComplex(2.0 / r__, 0)); // (phi2 - phi1 * c) * 2 / r - phi0 phi2__[i] = accCsub(z3, phi0__[i]); } } extern "C" void compute_chebyshev_polynomial_gpu(int num_gkvec, int n, double c, double r, acc_complex_double_t* phi0, acc_complex_double_t* phi1, acc_complex_double_t* phi2) { dim3 grid_t(64); dim3 grid_b(num_blocks(num_gkvec, grid_t.x), n); if (phi2 == NULL) { accLaunchKernel((compute_chebyshev_order1_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0, num_gkvec, c, r, phi0, phi1 ); } else { accLaunchKernel((compute_chebyshev_orderk_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0, num_gkvec, c, r, phi0, phi1, phi2 ); } } //== #define BLOCK_SIZE 32 //== //== __global__ void generate_beta_phi_gpu_kernel(int num_gkvec, //== int num_beta, //== int num_phi, //== int* beta_t_idx, //== double* atom_pos, //== double* gkvec, //== acc_complex_double_t* beta_pw_type, //== acc_complex_double_t* phi, //== acc_complex_double_t* beta_phi) //== { //== int idx_beta = blockDim.x * blockIdx.x + threadIdx.x; //== int idx_phi = blockDim.y * blockIdx.y + threadIdx.y; //== int ia, offset_t; //== double x0, y0, z0; //== //== if (idx_beta < num_beta) //== { //== ia = beta_t_idx[array2D_offset(0, idx_beta, 2)]; //== offset_t = beta_t_idx[array2D_offset(1, idx_beta, 2)]; //== x0 = atom_pos[array2D_offset(0, ia, 3)]; //== y0 = atom_pos[array2D_offset(1, ia, 3)]; //== z0 = atom_pos[array2D_offset(2, ia, 3)]; //== } //== //== int N = num_blocks(num_gkvec, BLOCK_SIZE); //== //== acc_complex_double_t val = make_accDoubleComplex(0.0, 0.0); //== //== for (int m = 0; m < N; m++) //== { //== __shared__ acc_complex_double_t beta_pw_tile[BLOCK_SIZE][BLOCK_SIZE]; //== __shared__ acc_complex_double_t phi_tile[BLOCK_SIZE][BLOCK_SIZE]; //== //== int bs = (m + 1) * BLOCK_SIZE > num_gkvec ? num_gkvec - m * BLOCK_SIZE : BLOCK_SIZE; //== //== int igk = m * BLOCK_SIZE + threadIdx.y; //== //== if (igk < num_gkvec && idx_beta < num_beta) //== { //== double x1 = gkvec[array2D_offset(igk, 0, num_gkvec)]; //== double y1 = gkvec[array2D_offset(igk, 1, num_gkvec)]; //== double z1 = gkvec[array2D_offset(igk, 2, num_gkvec)]; //== //== double p = twopi * (x0 * x1 + y0 * y1 + z0 * z1); //== double sinp = sin(p); //== double cosp = cos(p); //== //== beta_pw_tile[threadIdx.x][threadIdx.y] = accCmul(accConj(beta_pw_type[array2D_offset(igk, offset_t, num_gkvec)]), //== make_accDoubleComplex(cosp, sinp)); //== //== } //== //== igk = m * BLOCK_SIZE + threadIdx.x; //== //== if (igk < num_gkvec && idx_phi < num_phi) //== phi_tile[threadIdx.y][threadIdx.x] = phi[array2D_offset(igk, idx_phi, num_gkvec)]; //== //== __syncthreads(); //== //== for (int i = 0; i < bs; i++) val = accCadd(val, accCmul(beta_pw_tile[threadIdx.x][i], phi_tile[threadIdx.y][i])); //== //== __syncthreads(); //== } //== //== if (idx_beta < num_beta && idx_phi < num_phi) beta_phi[array2D_offset(idx_beta, idx_phi, num_beta)] = val; //== } //== //== //== extern "C" void generate_beta_phi_gpu(int num_gkvec, //== int num_beta, //== int num_phi, //== int* beta_t_idx, //== double* atom_pos, //== double* gkvec, //== void* beta_pw_type, //== void* phi, //== void* beta_phi) //== { //== //== dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); //== dim3 numBlocks(num_blocks(num_beta, BLOCK_SIZE), num_blocks(num_phi, BLOCK_SIZE)); //== //== accLaunchKernel((generate_beta_phi_gpu_kernel), dim3(//== numBlocks), dim3(//== threadsPerBlock), 0, 0, num_gkvec, //== num_beta, //== num_phi, //== beta_t_idx, //== atom_pos, //== gkvec, //== (acc_complex_double_t*)beta_pw_type, //== (acc_complex_double_t*)phi, //== (acc_complex_double_t*)beta_phi); //== } //__global__ void copy_beta_psi_gpu_kernel //( // acc_complex_double_t const* beta_psi, // int beta_psi_ld, // double const* wo, // acc_complex_double_t* beta_psi_wo, // int beta_psi_wo_ld //) //{ // int xi = threadIdx.x; // int j = blockIdx.x; // // beta_psi_wo[array2D_offset(xi, j, beta_psi_wo_ld)] = accCmul(accConj(beta_psi[array2D_offset(xi, j, beta_psi_ld)]), // make_accDoubleComplex(wo[j], 0.0)); //} //extern "C" void copy_beta_psi_gpu(int nbf, // int nloc, // acc_complex_double_t const* beta_psi, // int beta_psi_ld, // double const* wo, // acc_complex_double_t* beta_psi_wo, // int beta_psi_wo_ld, // int stream_id) //{ // dim3 grid_t(nbf); // dim3 grid_b(nloc); // // acc_stream_t stream = (stream_id == -1) ? NULL : streams[stream_id]; // // copy_beta_psi_gpu_kernel <<<grid_b, grid_t, 0, stream>>> // ( // beta_psi, // beta_psi_ld, // wo, // beta_psi_wo, // beta_psi_wo_ld // ); //} __global__ void compute_inner_product_gpu_kernel ( int num_gkvec_row, acc_complex_double_t const* f1, acc_complex_double_t const* f2, double* prod ) { int N = num_blocks(num_gkvec_row, blockDim.x); ACC_DYNAMIC_SHARED( char, sdata_ptr) double* sdata = (double*)&sdata_ptr[0]; sdata[threadIdx.x] = 0.0; for (int n = 0; n < N; n++) { int igk = n * blockDim.x + threadIdx.x; if (igk < num_gkvec_row) { int k = array2D_offset(igk, blockIdx.x, num_gkvec_row); sdata[threadIdx.x] += f1[k].x * f2[k].x + f1[k].y *f2[k].y; } } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (threadIdx.x % (2 * s) == 0) sdata[threadIdx.x] = sdata[threadIdx.x] + sdata[threadIdx.x + s]; __syncthreads(); } prod[blockIdx.x] = sdata[0]; } extern "C" void compute_inner_product_gpu(int num_gkvec_row, int n, acc_complex_double_t const* f1, acc_complex_double_t const* f2, double* prod) { dim3 grid_t(64); dim3 grid_b(n); accLaunchKernel((compute_inner_product_gpu_kernel), dim3(grid_b), dim3(grid_t), grid_t.x * sizeof(double), 0, num_gkvec_row, f1, f2, prod ); } __global__ void add_checksum_gpu_kernel ( acc_complex_double_t const* wf__, int num_rows_loc__, acc_complex_double_t* result__ ) { int N = num_blocks(num_rows_loc__, blockDim.x); ACC_DYNAMIC_SHARED( char, sdata_ptr) double* sdata_x = (double*)&sdata_ptr[0]; double* sdata_y = (double*)&sdata_ptr[blockDim.x * sizeof(double)]; sdata_x[threadIdx.x] = 0.0; sdata_y[threadIdx.x] = 0.0; for (int n = 0; n < N; n++) { int j = n * blockDim.x + threadIdx.x; if (j < num_rows_loc__) { int k = array2D_offset(j, blockIdx.x, num_rows_loc__); sdata_x[threadIdx.x] += wf__[k].x; sdata_y[threadIdx.x] += wf__[k].y; } } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (threadIdx.x % (2 * s) == 0) { sdata_x[threadIdx.x] = sdata_x[threadIdx.x] + sdata_x[threadIdx.x + s]; sdata_y[threadIdx.x] = sdata_y[threadIdx.x] + sdata_y[threadIdx.x + s]; } __syncthreads(); } result__[blockIdx.x] = accCadd(result__[blockIdx.x], make_accDoubleComplex(sdata_x[0], sdata_y[0])); } extern "C" void add_checksum_gpu(acc_complex_double_t* wf__, int num_rows_loc__, int nwf__, acc_complex_double_t* result__) { dim3 grid_t(64); dim3 grid_b(nwf__); accLaunchKernel((add_checksum_gpu_kernel), dim3(grid_b), dim3(grid_t), 2 * grid_t.x * sizeof(double), 0, wf__, num_rows_loc__, result__ ); }
06d15053e47e0c5674fad2ea5ad540beba1e896c.cu
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file cuda_uspp_kernels.cu * * \brief CUDA kernel for the PW-PW method. */ #include "../SDDK/GPU/cuda_common.hpp" #include "../SDDK/GPU/acc_runtime.hpp" extern acc_stream_t* streams; __global__ void compute_chebyshev_order1_gpu_kernel ( int num_gkvec__, double c__, double r__, acc_complex_double_t* phi0__, acc_complex_double_t* phi1__ ) { int igk = blockDim.x * blockIdx.x + threadIdx.x; int j = blockIdx.y; if (igk < num_gkvec__) { int i = array2D_offset(igk, j, num_gkvec__); // phi0 * c acc_complex_double_t z1 = accCmul(phi0__[i], make_accDoubleComplex(c__, 0)); // phi1 - phi0 * c acc_complex_double_t z2 = accCsub(phi1__[i], z1); // (phi1 - phi0 * c) / r phi1__[i] = accCdiv(z2, make_accDoubleComplex(r__, 0)); } } __global__ void compute_chebyshev_orderk_gpu_kernel ( int num_gkvec__, double c__, double r__, acc_complex_double_t* phi0__, acc_complex_double_t* phi1__, acc_complex_double_t* phi2__ ) { int igk = blockDim.x * blockIdx.x + threadIdx.x; int j = blockIdx.y; if (igk < num_gkvec__) { int i = array2D_offset(igk, j, num_gkvec__); // phi1 * c acc_complex_double_t z1 = accCmul(phi1__[i], make_accDoubleComplex(c__, 0)); // phi2 - phi1 * c acc_complex_double_t z2 = accCsub(phi2__[i], z1); // (phi2 - phi1 * c) * 2 / r acc_complex_double_t z3 = accCmul(z2, make_accDoubleComplex(2.0 / r__, 0)); // (phi2 - phi1 * c) * 2 / r - phi0 phi2__[i] = accCsub(z3, phi0__[i]); } } extern "C" void compute_chebyshev_polynomial_gpu(int num_gkvec, int n, double c, double r, acc_complex_double_t* phi0, acc_complex_double_t* phi1, acc_complex_double_t* phi2) { dim3 grid_t(64); dim3 grid_b(num_blocks(num_gkvec, grid_t.x), n); if (phi2 == NULL) { accLaunchKernel((compute_chebyshev_order1_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0, num_gkvec, c, r, phi0, phi1 ); } else { accLaunchKernel((compute_chebyshev_orderk_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0, num_gkvec, c, r, phi0, phi1, phi2 ); } } //== #define BLOCK_SIZE 32 //== //== __global__ void generate_beta_phi_gpu_kernel(int num_gkvec, //== int num_beta, //== int num_phi, //== int* beta_t_idx, //== double* atom_pos, //== double* gkvec, //== acc_complex_double_t* beta_pw_type, //== acc_complex_double_t* phi, //== acc_complex_double_t* beta_phi) //== { //== int idx_beta = blockDim.x * blockIdx.x + threadIdx.x; //== int idx_phi = blockDim.y * blockIdx.y + threadIdx.y; //== int ia, offset_t; //== double x0, y0, z0; //== //== if (idx_beta < num_beta) //== { //== ia = beta_t_idx[array2D_offset(0, idx_beta, 2)]; //== offset_t = beta_t_idx[array2D_offset(1, idx_beta, 2)]; //== x0 = atom_pos[array2D_offset(0, ia, 3)]; //== y0 = atom_pos[array2D_offset(1, ia, 3)]; //== z0 = atom_pos[array2D_offset(2, ia, 3)]; //== } //== //== int N = num_blocks(num_gkvec, BLOCK_SIZE); //== //== acc_complex_double_t val = make_accDoubleComplex(0.0, 0.0); //== //== for (int m = 0; m < N; m++) //== { //== __shared__ acc_complex_double_t beta_pw_tile[BLOCK_SIZE][BLOCK_SIZE]; //== __shared__ acc_complex_double_t phi_tile[BLOCK_SIZE][BLOCK_SIZE]; //== //== int bs = (m + 1) * BLOCK_SIZE > num_gkvec ? num_gkvec - m * BLOCK_SIZE : BLOCK_SIZE; //== //== int igk = m * BLOCK_SIZE + threadIdx.y; //== //== if (igk < num_gkvec && idx_beta < num_beta) //== { //== double x1 = gkvec[array2D_offset(igk, 0, num_gkvec)]; //== double y1 = gkvec[array2D_offset(igk, 1, num_gkvec)]; //== double z1 = gkvec[array2D_offset(igk, 2, num_gkvec)]; //== //== double p = twopi * (x0 * x1 + y0 * y1 + z0 * z1); //== double sinp = sin(p); //== double cosp = cos(p); //== //== beta_pw_tile[threadIdx.x][threadIdx.y] = accCmul(accConj(beta_pw_type[array2D_offset(igk, offset_t, num_gkvec)]), //== make_accDoubleComplex(cosp, sinp)); //== //== } //== //== igk = m * BLOCK_SIZE + threadIdx.x; //== //== if (igk < num_gkvec && idx_phi < num_phi) //== phi_tile[threadIdx.y][threadIdx.x] = phi[array2D_offset(igk, idx_phi, num_gkvec)]; //== //== __syncthreads(); //== //== for (int i = 0; i < bs; i++) val = accCadd(val, accCmul(beta_pw_tile[threadIdx.x][i], phi_tile[threadIdx.y][i])); //== //== __syncthreads(); //== } //== //== if (idx_beta < num_beta && idx_phi < num_phi) beta_phi[array2D_offset(idx_beta, idx_phi, num_beta)] = val; //== } //== //== //== extern "C" void generate_beta_phi_gpu(int num_gkvec, //== int num_beta, //== int num_phi, //== int* beta_t_idx, //== double* atom_pos, //== double* gkvec, //== void* beta_pw_type, //== void* phi, //== void* beta_phi) //== { //== //== dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); //== dim3 numBlocks(num_blocks(num_beta, BLOCK_SIZE), num_blocks(num_phi, BLOCK_SIZE)); //== //== accLaunchKernel((generate_beta_phi_gpu_kernel), dim3(//== numBlocks), dim3(//== threadsPerBlock), 0, 0, num_gkvec, //== num_beta, //== num_phi, //== beta_t_idx, //== atom_pos, //== gkvec, //== (acc_complex_double_t*)beta_pw_type, //== (acc_complex_double_t*)phi, //== (acc_complex_double_t*)beta_phi); //== } //__global__ void copy_beta_psi_gpu_kernel //( // acc_complex_double_t const* beta_psi, // int beta_psi_ld, // double const* wo, // acc_complex_double_t* beta_psi_wo, // int beta_psi_wo_ld //) //{ // int xi = threadIdx.x; // int j = blockIdx.x; // // beta_psi_wo[array2D_offset(xi, j, beta_psi_wo_ld)] = accCmul(accConj(beta_psi[array2D_offset(xi, j, beta_psi_ld)]), // make_accDoubleComplex(wo[j], 0.0)); //} //extern "C" void copy_beta_psi_gpu(int nbf, // int nloc, // acc_complex_double_t const* beta_psi, // int beta_psi_ld, // double const* wo, // acc_complex_double_t* beta_psi_wo, // int beta_psi_wo_ld, // int stream_id) //{ // dim3 grid_t(nbf); // dim3 grid_b(nloc); // // acc_stream_t stream = (stream_id == -1) ? NULL : streams[stream_id]; // // copy_beta_psi_gpu_kernel <<<grid_b, grid_t, 0, stream>>> // ( // beta_psi, // beta_psi_ld, // wo, // beta_psi_wo, // beta_psi_wo_ld // ); //} __global__ void compute_inner_product_gpu_kernel ( int num_gkvec_row, acc_complex_double_t const* f1, acc_complex_double_t const* f2, double* prod ) { int N = num_blocks(num_gkvec_row, blockDim.x); ACC_DYNAMIC_SHARED( char, sdata_ptr) double* sdata = (double*)&sdata_ptr[0]; sdata[threadIdx.x] = 0.0; for (int n = 0; n < N; n++) { int igk = n * blockDim.x + threadIdx.x; if (igk < num_gkvec_row) { int k = array2D_offset(igk, blockIdx.x, num_gkvec_row); sdata[threadIdx.x] += f1[k].x * f2[k].x + f1[k].y *f2[k].y; } } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (threadIdx.x % (2 * s) == 0) sdata[threadIdx.x] = sdata[threadIdx.x] + sdata[threadIdx.x + s]; __syncthreads(); } prod[blockIdx.x] = sdata[0]; } extern "C" void compute_inner_product_gpu(int num_gkvec_row, int n, acc_complex_double_t const* f1, acc_complex_double_t const* f2, double* prod) { dim3 grid_t(64); dim3 grid_b(n); accLaunchKernel((compute_inner_product_gpu_kernel), dim3(grid_b), dim3(grid_t), grid_t.x * sizeof(double), 0, num_gkvec_row, f1, f2, prod ); } __global__ void add_checksum_gpu_kernel ( acc_complex_double_t const* wf__, int num_rows_loc__, acc_complex_double_t* result__ ) { int N = num_blocks(num_rows_loc__, blockDim.x); ACC_DYNAMIC_SHARED( char, sdata_ptr) double* sdata_x = (double*)&sdata_ptr[0]; double* sdata_y = (double*)&sdata_ptr[blockDim.x * sizeof(double)]; sdata_x[threadIdx.x] = 0.0; sdata_y[threadIdx.x] = 0.0; for (int n = 0; n < N; n++) { int j = n * blockDim.x + threadIdx.x; if (j < num_rows_loc__) { int k = array2D_offset(j, blockIdx.x, num_rows_loc__); sdata_x[threadIdx.x] += wf__[k].x; sdata_y[threadIdx.x] += wf__[k].y; } } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (threadIdx.x % (2 * s) == 0) { sdata_x[threadIdx.x] = sdata_x[threadIdx.x] + sdata_x[threadIdx.x + s]; sdata_y[threadIdx.x] = sdata_y[threadIdx.x] + sdata_y[threadIdx.x + s]; } __syncthreads(); } result__[blockIdx.x] = accCadd(result__[blockIdx.x], make_accDoubleComplex(sdata_x[0], sdata_y[0])); } extern "C" void add_checksum_gpu(acc_complex_double_t* wf__, int num_rows_loc__, int nwf__, acc_complex_double_t* result__) { dim3 grid_t(64); dim3 grid_b(nwf__); accLaunchKernel((add_checksum_gpu_kernel), dim3(grid_b), dim3(grid_t), 2 * grid_t.x * sizeof(double), 0, wf__, num_rows_loc__, result__ ); }
23ce3bab90776b90a93e50db858978e1038067da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gpuinflate.h" #include <io/utilities/block_utils.cuh> namespace cudf { namespace io { // Not supporting streams longer than this (not what snappy is intended for) #define SNAPPY_MAX_STREAM_SIZE 0x7fffffff #define LOG2_BATCH_SIZE 5 #define BATCH_SIZE (1 << LOG2_BATCH_SIZE) #define LOG2_BATCH_COUNT 2 #define BATCH_COUNT (1 << LOG2_BATCH_COUNT) #define LOG2_PREFETCH_SIZE 9 #define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE) // 512B, in 32B chunks #define LOG_CYCLECOUNT 0 struct unsnap_batch_s { int32_t len; // 1..64 = Number of bytes uint32_t offset; // copy distance if greater than zero or negative of literal offset in byte stream }; struct unsnap_queue_s { uint32_t prefetch_wrpos; uint32_t prefetch_rdpos; int32_t prefetch_end; int32_t batch_len[BATCH_COUNT]; // Length of each batch - <0:end, 0:not ready, >0:symbol count unsnap_batch_s batch[BATCH_COUNT * BATCH_SIZE]; uint8_t buf[PREFETCH_SIZE]; // Prefetch buffer }; struct unsnap_state_s { const uint8_t *base; const uint8_t *end; uint32_t uncompressed_size; uint32_t bytes_left; int32_t error; uint32_t tstart; volatile unsnap_queue_s q; gpu_inflate_input_s in; }; __device__ void snappy_prefetch_bytestream(unsnap_state_s *s, int t) { const uint8_t *base = s->base; uint32_t end = (uint32_t)(s->end - base); uint32_t align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base))); int32_t pos = min(align_bytes, end); int32_t blen; // Start by prefetching up to the next a 32B-aligned location if (t < pos) { s->q.buf[t] = base[t]; } blen = 0; do { SYNCWARP(); if (!t) { uint32_t minrdpos; s->q.prefetch_wrpos = pos; minrdpos = pos - min(pos, PREFETCH_SIZE - 32u); blen = (int)min(32u, end - pos); for (;;) { uint32_t rdpos = s->q.prefetch_rdpos; if (rdpos >= minrdpos) break; if (s->q.prefetch_end) { blen = 0; break; } NANOSLEEP(100); } } blen = SHFL0(blen); if (t < blen) { s->q.buf[(pos + t) & (PREFETCH_SIZE - 1)] = base[pos + t]; } pos += blen; } while (blen > 0); } /* * @brief Lookup table for get_len3_mask() * * Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of * 3-byte code lengths in the lower 4 bits, along with the total number of * bytes used for coding the four lengths in the upper 4 bits. * The upper 4-bit value could also be obtained by 8+__popc(mask4) * * for (uint32_t k = 0; k < 1024; k++) * { * for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++) * { * v |= (b & 1) << i; * n += (b & 1) + 2; * b >>= (b & 1) + 2; * } * k_len3lut[k] = v | (n << 4); * } * */ static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = { 0x80,0x91,0x80,0x91,0x92,0x91,0x92,0x91,0x80,0xa3,0x80,0xa3,0x92,0xa3,0x92,0xa3, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xa3,0x94,0xa3,0x92,0xa3,0x92,0xa3, 0x80,0xa5,0x80,0xa5,0xa6,0xa5,0xa6,0xa5,0x80,0xa3,0x80,0xa3,0xa6,0xa3,0xa6,0xa3, 0x94,0xa5,0x94,0xa5,0xa6,0xa5,0xa6,0xa5,0x94,0xa3,0x94,0xa3,0xa6,0xa3,0xa6,0xa3, 0x98,0x91,0x98,0x91,0x92,0x91,0x92,0x91,0x98,0xb7,0x98,0xb7,0x92,0xb7,0x92,0xb7, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xb7,0x94,0xb7,0x92,0xb7,0x92,0xb7, 0x98,0xa5,0x98,0xa5,0xa6,0xa5,0xa6,0xa5,0x98,0xb7,0x98,0xb7,0xa6,0xb7,0xa6,0xb7, 0x94,0xa5,0x94,0xa5,0xa6,0xa5,0xa6,0xa5,0x94,0xb7,0x94,0xb7,0xa6,0xb7,0xa6,0xb7, 0x80,0xa9,0x80,0xa9,0xaa,0xa9,0xaa,0xa9,0x80,0xa3,0x80,0xa3,0xaa,0xa3,0xaa,0xa3, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xa3,0xac,0xa3,0xaa,0xa3,0xaa,0xa3, 0x80,0xa5,0x80,0xa5,0xa6,0xa5,0xa6,0xa5,0x80,0xa3,0x80,0xa3,0xa6,0xa3,0xa6,0xa3, 0xac,0xa5,0xac,0xa5,0xa6,0xa5,0xa6,0xa5,0xac,0xa3,0xac,0xa3,0xa6,0xa3,0xa6,0xa3, 0x98,0xa9,0x98,0xa9,0xaa,0xa9,0xaa,0xa9,0x98,0xb7,0x98,0xb7,0xaa,0xb7,0xaa,0xb7, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xb7,0xac,0xb7,0xaa,0xb7,0xaa,0xb7, 0x98,0xa5,0x98,0xa5,0xa6,0xa5,0xa6,0xa5,0x98,0xb7,0x98,0xb7,0xa6,0xb7,0xa6,0xb7, 0xac,0xa5,0xac,0xa5,0xa6,0xa5,0xa6,0xa5,0xac,0xb7,0xac,0xb7,0xa6,0xb7,0xa6,0xb7, 0x80,0x91,0x80,0x91,0x92,0x91,0x92,0x91,0x80,0xbb,0x80,0xbb,0x92,0xbb,0x92,0xbb, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xbb,0x94,0xbb,0x92,0xbb,0x92,0xbb, 0x80,0xbd,0x80,0xbd,0xbe,0xbd,0xbe,0xbd,0x80,0xbb,0x80,0xbb,0xbe,0xbb,0xbe,0xbb, 0x94,0xbd,0x94,0xbd,0xbe,0xbd,0xbe,0xbd,0x94,0xbb,0x94,0xbb,0xbe,0xbb,0xbe,0xbb, 0x98,0x91,0x98,0x91,0x92,0x91,0x92,0x91,0x98,0xb7,0x98,0xb7,0x92,0xb7,0x92,0xb7, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xb7,0x94,0xb7,0x92,0xb7,0x92,0xb7, 0x98,0xbd,0x98,0xbd,0xbe,0xbd,0xbe,0xbd,0x98,0xb7,0x98,0xb7,0xbe,0xb7,0xbe,0xb7, 0x94,0xbd,0x94,0xbd,0xbe,0xbd,0xbe,0xbd,0x94,0xb7,0x94,0xb7,0xbe,0xb7,0xbe,0xb7, 0x80,0xa9,0x80,0xa9,0xaa,0xa9,0xaa,0xa9,0x80,0xbb,0x80,0xbb,0xaa,0xbb,0xaa,0xbb, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xbb,0xac,0xbb,0xaa,0xbb,0xaa,0xbb, 0x80,0xbd,0x80,0xbd,0xbe,0xbd,0xbe,0xbd,0x80,0xbb,0x80,0xbb,0xbe,0xbb,0xbe,0xbb, 0xac,0xbd,0xac,0xbd,0xbe,0xbd,0xbe,0xbd,0xac,0xbb,0xac,0xbb,0xbe,0xbb,0xbe,0xbb, 0x98,0xa9,0x98,0xa9,0xaa,0xa9,0xaa,0xa9,0x98,0xb7,0x98,0xb7,0xaa,0xb7,0xaa,0xb7, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xb7,0xac,0xb7,0xaa,0xb7,0xaa,0xb7, 0x98,0xbd,0x98,0xbd,0xbe,0xbd,0xbe,0xbd,0x98,0xb7,0x98,0xb7,0xbe,0xb7,0xbe,0xb7, 0xac,0xbd,0xac,0xbd,0xbe,0xbd,0xbe,0xbd,0xac,0xb7,0xac,0xb7,0xbe,0xb7,0xbe,0xb7, 0x80,0x91,0x80,0x91,0x92,0x91,0x92,0x91,0x80,0xa3,0x80,0xa3,0x92,0xa3,0x92,0xa3, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xa3,0x94,0xa3,0x92,0xa3,0x92,0xa3, 0x80,0xa5,0x80,0xa5,0xa6,0xa5,0xa6,0xa5,0x80,0xa3,0x80,0xa3,0xa6,0xa3,0xa6,0xa3, 0x94,0xa5,0x94,0xa5,0xa6,0xa5,0xa6,0xa5,0x94,0xa3,0x94,0xa3,0xa6,0xa3,0xa6,0xa3, 0x98,0x91,0x98,0x91,0x92,0x91,0x92,0x91,0x98,0xcf,0x98,0xcf,0x92,0xcf,0x92,0xcf, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xcf,0x94,0xcf,0x92,0xcf,0x92,0xcf, 0x98,0xa5,0x98,0xa5,0xa6,0xa5,0xa6,0xa5,0x98,0xcf,0x98,0xcf,0xa6,0xcf,0xa6,0xcf, 0x94,0xa5,0x94,0xa5,0xa6,0xa5,0xa6,0xa5,0x94,0xcf,0x94,0xcf,0xa6,0xcf,0xa6,0xcf, 0x80,0xa9,0x80,0xa9,0xaa,0xa9,0xaa,0xa9,0x80,0xa3,0x80,0xa3,0xaa,0xa3,0xaa,0xa3, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xa3,0xac,0xa3,0xaa,0xa3,0xaa,0xa3, 0x80,0xa5,0x80,0xa5,0xa6,0xa5,0xa6,0xa5,0x80,0xa3,0x80,0xa3,0xa6,0xa3,0xa6,0xa3, 0xac,0xa5,0xac,0xa5,0xa6,0xa5,0xa6,0xa5,0xac,0xa3,0xac,0xa3,0xa6,0xa3,0xa6,0xa3, 0x98,0xa9,0x98,0xa9,0xaa,0xa9,0xaa,0xa9,0x98,0xcf,0x98,0xcf,0xaa,0xcf,0xaa,0xcf, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xcf,0xac,0xcf,0xaa,0xcf,0xaa,0xcf, 0x98,0xa5,0x98,0xa5,0xa6,0xa5,0xa6,0xa5,0x98,0xcf,0x98,0xcf,0xa6,0xcf,0xa6,0xcf, 0xac,0xa5,0xac,0xa5,0xa6,0xa5,0xa6,0xa5,0xac,0xcf,0xac,0xcf,0xa6,0xcf,0xa6,0xcf, 0x80,0x91,0x80,0x91,0x92,0x91,0x92,0x91,0x80,0xbb,0x80,0xbb,0x92,0xbb,0x92,0xbb, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xbb,0x94,0xbb,0x92,0xbb,0x92,0xbb, 0x80,0xbd,0x80,0xbd,0xbe,0xbd,0xbe,0xbd,0x80,0xbb,0x80,0xbb,0xbe,0xbb,0xbe,0xbb, 0x94,0xbd,0x94,0xbd,0xbe,0xbd,0xbe,0xbd,0x94,0xbb,0x94,0xbb,0xbe,0xbb,0xbe,0xbb, 0x98,0x91,0x98,0x91,0x92,0x91,0x92,0x91,0x98,0xcf,0x98,0xcf,0x92,0xcf,0x92,0xcf, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xcf,0x94,0xcf,0x92,0xcf,0x92,0xcf, 0x98,0xbd,0x98,0xbd,0xbe,0xbd,0xbe,0xbd,0x98,0xcf,0x98,0xcf,0xbe,0xcf,0xbe,0xcf, 0x94,0xbd,0x94,0xbd,0xbe,0xbd,0xbe,0xbd,0x94,0xcf,0x94,0xcf,0xbe,0xcf,0xbe,0xcf, 0x80,0xa9,0x80,0xa9,0xaa,0xa9,0xaa,0xa9,0x80,0xbb,0x80,0xbb,0xaa,0xbb,0xaa,0xbb, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xbb,0xac,0xbb,0xaa,0xbb,0xaa,0xbb, 0x80,0xbd,0x80,0xbd,0xbe,0xbd,0xbe,0xbd,0x80,0xbb,0x80,0xbb,0xbe,0xbb,0xbe,0xbb, 0xac,0xbd,0xac,0xbd,0xbe,0xbd,0xbe,0xbd,0xac,0xbb,0xac,0xbb,0xbe,0xbb,0xbe,0xbb, 0x98,0xa9,0x98,0xa9,0xaa,0xa9,0xaa,0xa9,0x98,0xcf,0x98,0xcf,0xaa,0xcf,0xaa,0xcf, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xcf,0xac,0xcf,0xaa,0xcf,0xaa,0xcf, 0x98,0xbd,0x98,0xbd,0xbe,0xbd,0xbe,0xbd,0x98,0xcf,0x98,0xcf,0xbe,0xcf,0xbe,0xcf, 0xac,0xbd,0xac,0xbd,0xbe,0xbd,0xbe,0xbd,0xac,0xcf,0xac,0xcf,0xbe,0xcf,0xbe,0xcf }; /* * @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte * code length, given an input mask of up to 96 bits. * * Implemented by doing 8 consecutive lookups, building the result 4-bit at a time */ inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2) { uint32_t m, v, m4, n; v = v0; m4 = k_len3lut[v & 0x3ff]; m = m4 & 0xf; n = m4 >> 4; // 8..12 v = v0 >> n; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 4; n += m4 >> 4; // 16..24 v = __funnelshift_r(v0, v1, n); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 8; n += m4 >> 4; // 24..36 v >>= (m4 >> 4); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 12; n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16 v1 = __funnelshift_r(v1, v2, n); v2 >>= n; v = v1; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 16; n = m4 >> 4; // 8..12 v = v1 >> n; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 20; n += m4 >> 4; // 16..24 v = __funnelshift_r(v1, v2, n); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 24; n += m4 >> 4; // 24..36 v >>= (m4 >> 4); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 28; return m; } /* * @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length * minus 2, given two input masks each containing bit0 or bit1 of the corresponding * code length minus 2 for up to 32 bytes */ inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1) { uint32_t m; m = (v1 & 1) * 2 + (v0 & 1); v0 >>= (m + 2); v1 >>= (m + 1); for (uint32_t i = 1; i < 16; i++) { uint32_t m2 = (v1 & 2) | (v0 & 1); uint32_t n = m2 + 2; m |= m2 << (i * 2); v0 >>= n; v1 >>= n; } return m; } #define READ_BYTE(pos) s->q.buf[(pos) & (PREFETCH_SIZE-1)] __device__ void snappy_decode_symbols(unsnap_state_s *s, uint32_t t) { uint32_t cur = 0; uint32_t end = static_cast<uint32_t>(s->end - s->base); uint32_t bytes_left = s->uncompressed_size; uint32_t dst_pos = 0; int32_t batch = 0; for (;;) { int32_t batch_len; volatile unsnap_batch_s *b; // Wait for prefetcher if (t == 0) { s->q.prefetch_rdpos = cur; #pragma unroll(1) // We don't want unrolling here while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); } b = &s->q.batch[batch * BATCH_SIZE]; } // Process small symbols in parallel: for data that does not get good compression, // the stream will consist of a large number of short literals (1-byte or 2-byte) // followed by short repeat runs. This results in many 2-byte or 3-byte symbols // that can all be decoded in parallel once we know the symbol length. { uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask; uint32_t b0; cur = SHFL0(cur); cur_t = cur + t; b0 = READ_BYTE(cur_t); v0 = BALLOT((b0 == 4) || (b0 & 2)); b0 = READ_BYTE(cur_t + 32); v1 = BALLOT((b0 == 4) || (b0 & 2)); b0 = READ_BYTE(cur_t + 64); v2 = BALLOT((b0 == 4) || (b0 & 2)); len3_mask = SHFL0((t == 0) ? get_len3_mask(v0, v1, v2) : 0); cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1)); b0 = READ_BYTE(cur_t); is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0); short_sym_mask = BALLOT(is_long_sym); batch_len = 0; b = reinterpret_cast<volatile unsnap_batch_s *>(SHFL0(reinterpret_cast<uintptr_t>(b))); if (!(short_sym_mask & 1)) { batch_len = SHFL0((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0); if (batch_len != 0) { uint32_t blen = 0; int32_t ofs = 0; if (t < batch_len) { blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1); ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1) : (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8) : -(int32_t)(cur_t + 1); b[t].len = blen; b[t].offset = ofs; ofs += blen; // for correct out-of-range detection below } blen = WarpReducePos32(blen, t); bytes_left = SHFL0(bytes_left); dst_pos = SHFL0(dst_pos); short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen))); if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); } if (batch_len != 0) { blen = SHFL(blen, batch_len - 1); cur = SHFL(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1); if (t == 0) { dst_pos += blen; bytes_left -= blen; } } } } // Check if the batch was stopped by a 3-byte or 4-byte literal if (batch_len < BATCH_SIZE-2 && SHFL(b0 & ~4, batch_len) == 8) { // If so, run a slower version of the above that can also handle 3/4-byte literal sequences uint32_t batch_add; do { uint32_t clen, mask_t; cur_t = cur + t; b0 = READ_BYTE(cur_t); clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2 v0 = BALLOT(clen & 1); v1 = BALLOT((clen >> 1) & 1); len3_mask = SHFL0((t == 0) ? get_len5_mask(v0, v1) : 0); mask_t = (1 << (2 * t)) - 1; cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) + __popc((len3_mask & 0x55555555) & mask_t); b0 = READ_BYTE(cur_t); is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3*4)) || (cur_t >= cur + 32) || (batch_len + t >= BATCH_SIZE); batch_add = __ffs(BALLOT(is_long_sym)) - 1; if (batch_add != 0) { uint32_t blen = 0; int32_t ofs = 0; if (t < batch_add) { blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1); ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1) : (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8) : -(int32_t)(cur_t + 1); b[batch_len + t].len = blen; b[batch_len + t].offset = ofs; ofs += blen; // for correct out-of-range detection below } blen = WarpReducePos32(blen, t); bytes_left = SHFL0(bytes_left); dst_pos = SHFL0(dst_pos); short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen))); if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); } if (batch_add != 0) { blen = SHFL(blen, batch_add - 1); cur = SHFL(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add-1) * 2)) & 3); if (t == 0) { dst_pos += blen; bytes_left -= blen; } batch_len += batch_add; } } } while (batch_add >= 6 && batch_len < BATCH_SIZE-2); } } if (t == 0) { while (bytes_left > 0 && batch_len < BATCH_SIZE) { uint32_t blen, offset; uint8_t b0 = READ_BYTE(cur); if (b0 & 3) { uint8_t b1 = READ_BYTE(cur+1); if (!(b0 & 2)) { // xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset offset = ((b0 & 0xe0) << 3) | b1; blen = ((b0 >> 2) & 7) + 4; cur += 2; } else { // xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset offset = b1 | (READ_BYTE(cur+2) << 8); if (b0 & 1) // 4-byte offset { offset |= (READ_BYTE(cur+3) << 16) | (READ_BYTE(cur + 4) << 24); cur += 5; } else { cur += 3; } blen = (b0 >> 2) + 1; } dst_pos += blen; if (offset - 1u >= dst_pos || bytes_left < blen) break; bytes_left -= blen; } else if (b0 < 4*4) { // 0000xx00: short literal blen = (b0 >> 2) + 1; offset = -(int32_t)(cur + 1); cur += 1 + blen; dst_pos += blen; if (bytes_left < blen) break; bytes_left -= blen; } else { // xxxxxx00: literal blen = b0 >> 2; if (blen >= 60) { uint32_t num_bytes = blen - 59; blen = READ_BYTE(cur + 1); if (num_bytes > 1) { blen |= READ_BYTE(cur + 2) << 8; if (num_bytes > 2) { blen |= READ_BYTE(cur + 3) << 16; if (num_bytes > 3) { blen |= READ_BYTE(cur + 4) << 24; } } } cur += num_bytes; } cur += 1; blen += 1; offset = -(int32_t)cur; cur += blen; // Wait for prefetcher s->q.prefetch_rdpos = cur; #pragma unroll(1) // We don't want unrolling here while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); } dst_pos += blen; if (bytes_left < blen) break; bytes_left -= blen; } b[batch_len].len = blen; b[batch_len].offset = offset; batch_len++; } if (batch_len != 0) { s->q.batch_len[batch] = batch_len; batch = (batch + 1) & (BATCH_COUNT - 1); } } batch_len = SHFL0(batch_len); if (t == 0) { while (s->q.batch_len[batch] != 0) { NANOSLEEP(100); } } if (batch_len != BATCH_SIZE) { break; } } if (!t) { s->q.prefetch_end = 1; s->q.batch_len[batch] = -1; s->bytes_left = bytes_left; if (bytes_left != 0) { s->error = -2; } } } // WARP1: process symbols and output uncompressed stream // NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that would result in out-of-bounds accesses) __device__ void snappy_process_symbols(unsnap_state_s *s, int t) { const uint8_t *literal_base = s->base; uint8_t *out = reinterpret_cast<uint8_t *>(s->in.dstDevice); int batch = 0; do { volatile unsnap_batch_s *b = &s->q.batch[batch * BATCH_SIZE]; int32_t batch_len, blen_t, dist_t; if (t == 0) { while ((batch_len = s->q.batch_len[batch]) == 0) { NANOSLEEP(100); } } else { batch_len = 0; } batch_len = SHFL0(batch_len); if (batch_len <= 0) { break; } if (t < batch_len) { blen_t = b[t].len; dist_t = b[t].offset; } else { blen_t = dist_t = 0; } // Try to combine as many small entries as possible, but try to avoid doing that // if we see a small repeat distance 8 bytes or less if (SHFL0(min((uint32_t)dist_t, (uint32_t)SHFL_XOR(dist_t, 1))) > 8) { uint32_t n; do { uint32_t bofs = WarpReducePos32(blen_t, t); uint32_t stop_mask = BALLOT((uint32_t)dist_t < bofs); uint32_t start_mask = WarpReduceSum32((bofs < 32 && t < batch_len) ? 1 << bofs : 0); n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)), (uint32_t)batch_len); if (n != 0) { uint32_t it = __popc(start_mask & ((2 << t) - 1)); uint32_t tr = t - SHFL(bofs - blen_t, it); int32_t dist = SHFL(dist_t, it); if (it < n) { const uint8_t *src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist); out[t] = *src; } out += SHFL(bofs, n - 1); blen_t = SHFL(blen_t, (n + t) & 0x1f); dist_t = SHFL(dist_t, (n + t) & 0x1f); batch_len -= n; } } while (n >= 4); } for (int i = 0; i < batch_len; i++) { int32_t blen = SHFL(blen_t, i); int32_t dist = SHFL(dist_t, i); int32_t blen2 = (i + 1 < batch_len) ? SHFL(blen_t, i + 1) : 32; // Try to combine consecutive small entries if they are independent if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) { int32_t dist2 = SHFL(dist_t, i+1); if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) { int32_t d; if (t < blen) { d = dist; } else { dist = dist2; d = (dist2 <= 0) ? dist2 + blen : dist2; } blen += blen2; if (t < blen) { const uint8_t *src = (dist > 0) ? (out - d) : (literal_base - d); out[t] = src[t]; } out += blen; i++; continue; } } if (dist > 0) { // Copy uint8_t b0, b1; if (t < blen) { uint32_t pos = t; const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist; b0 = *src; } if (32 + t < blen) { uint32_t pos = 32 + t; const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist; b1 = *src; } if (t < blen) { out[t] = b0; } if (32 + t < blen) { out[32 + t] = b1; } } else { // Literal uint8_t b0, b1; dist = -dist; while (blen >= 64) { b0 = literal_base[dist + t]; b1 = literal_base[dist + 32 + t]; out[t] = b0; out[32 + t] = b1; dist += 64; out += 64; blen -= 64; } if (t < blen) { b0 = literal_base[dist + t]; } if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; } if (t < blen) { out[t] = b0; } if (32 + t < blen) { out[32 + t] = b1; } } out += blen; } SYNCWARP(); if (t == 0) { s->q.batch_len[batch] = 0; } batch = (batch + 1) & (BATCH_COUNT - 1); } while (1); } // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) unsnap_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs) { __shared__ __align__(16) unsnap_state_s state_g; int t = threadIdx.x; unsnap_state_s *s = &state_g; int strm_id = blockIdx.x; if (t < sizeof(gpu_inflate_input_s) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->in)[t] = reinterpret_cast<const uint32_t *>(&inputs[strm_id])[t]; __threadfence_block(); } if (t < BATCH_COUNT) { s->q.batch_len[t] = 0; } __syncthreads(); if (!t) { const uint8_t *cur = reinterpret_cast<const uint8_t *>(s->in.srcDevice); const uint8_t *end = cur + s->in.srcSize; s->error = 0; #if LOG_CYCLECOUNT s->tstart = clock(); #endif if (cur < end) { // Read uncompressed size (varint), limited to 32-bit uint32_t uncompressed_size = *cur++; if (uncompressed_size > 0x7f) { uint32_t c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & 0x7f) | (c << 7); if (uncompressed_size >= (0x80 << 7)) { c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14); if (uncompressed_size >= (0x80 << 14)) { c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21); if (uncompressed_size >= (0x80 << 21)) { c = (cur < end) ? *cur++ : 0; if (c < 0x8) uncompressed_size = (uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 28); else s->error = -1; } } } } s->uncompressed_size = uncompressed_size; s->bytes_left = uncompressed_size; s->base = cur; s->end = end; if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->in.dstSize)) { s->error = -1; } } else { s->error = -1; } s->q.prefetch_end = 0; s->q.prefetch_wrpos = 0; s->q.prefetch_rdpos = 0; } __syncthreads(); if (!s->error) { if (t < 32) { // WARP0: decode lengths and offsets snappy_decode_symbols(s, t); } else if (t < 64) { // WARP1: prefetch byte stream for WARP0 snappy_prefetch_bytestream(s, t & 0x1f); } else if (t < 96) { // WARP2: LZ77 snappy_process_symbols(s, t & 0x1f); } __syncthreads(); } if (!t) { outputs[strm_id].bytes_written = s->uncompressed_size - s->bytes_left; outputs[strm_id].status = s->error; #if LOG_CYCLECOUNT outputs[strm_id].reserved = clock() - s->tstart; #else outputs[strm_id].reserved = 0; #endif } } hipError_t __host__ gpu_unsnap(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int count, hipStream_t stream) { uint32_t count32 = (count > 0) ? count : 0; dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block dim3 dim_grid(count32, 1); // TODO: Check max grid dimensions vs max expected count hipLaunchKernelGGL(( unsnap_kernel) , dim3(dim_grid), dim3(dim_block), 0, stream , inputs, outputs); return hipSuccess; } } // namespace io } // namespace cudf
23ce3bab90776b90a93e50db858978e1038067da.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gpuinflate.h" #include <io/utilities/block_utils.cuh> namespace cudf { namespace io { // Not supporting streams longer than this (not what snappy is intended for) #define SNAPPY_MAX_STREAM_SIZE 0x7fffffff #define LOG2_BATCH_SIZE 5 #define BATCH_SIZE (1 << LOG2_BATCH_SIZE) #define LOG2_BATCH_COUNT 2 #define BATCH_COUNT (1 << LOG2_BATCH_COUNT) #define LOG2_PREFETCH_SIZE 9 #define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE) // 512B, in 32B chunks #define LOG_CYCLECOUNT 0 struct unsnap_batch_s { int32_t len; // 1..64 = Number of bytes uint32_t offset; // copy distance if greater than zero or negative of literal offset in byte stream }; struct unsnap_queue_s { uint32_t prefetch_wrpos; uint32_t prefetch_rdpos; int32_t prefetch_end; int32_t batch_len[BATCH_COUNT]; // Length of each batch - <0:end, 0:not ready, >0:symbol count unsnap_batch_s batch[BATCH_COUNT * BATCH_SIZE]; uint8_t buf[PREFETCH_SIZE]; // Prefetch buffer }; struct unsnap_state_s { const uint8_t *base; const uint8_t *end; uint32_t uncompressed_size; uint32_t bytes_left; int32_t error; uint32_t tstart; volatile unsnap_queue_s q; gpu_inflate_input_s in; }; __device__ void snappy_prefetch_bytestream(unsnap_state_s *s, int t) { const uint8_t *base = s->base; uint32_t end = (uint32_t)(s->end - base); uint32_t align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base))); int32_t pos = min(align_bytes, end); int32_t blen; // Start by prefetching up to the next a 32B-aligned location if (t < pos) { s->q.buf[t] = base[t]; } blen = 0; do { SYNCWARP(); if (!t) { uint32_t minrdpos; s->q.prefetch_wrpos = pos; minrdpos = pos - min(pos, PREFETCH_SIZE - 32u); blen = (int)min(32u, end - pos); for (;;) { uint32_t rdpos = s->q.prefetch_rdpos; if (rdpos >= minrdpos) break; if (s->q.prefetch_end) { blen = 0; break; } NANOSLEEP(100); } } blen = SHFL0(blen); if (t < blen) { s->q.buf[(pos + t) & (PREFETCH_SIZE - 1)] = base[pos + t]; } pos += blen; } while (blen > 0); } /* * @brief Lookup table for get_len3_mask() * * Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of * 3-byte code lengths in the lower 4 bits, along with the total number of * bytes used for coding the four lengths in the upper 4 bits. * The upper 4-bit value could also be obtained by 8+__popc(mask4) * * for (uint32_t k = 0; k < 1024; k++) * { * for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++) * { * v |= (b & 1) << i; * n += (b & 1) + 2; * b >>= (b & 1) + 2; * } * k_len3lut[k] = v | (n << 4); * } * */ static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = { 0x80,0x91,0x80,0x91,0x92,0x91,0x92,0x91,0x80,0xa3,0x80,0xa3,0x92,0xa3,0x92,0xa3, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xa3,0x94,0xa3,0x92,0xa3,0x92,0xa3, 0x80,0xa5,0x80,0xa5,0xa6,0xa5,0xa6,0xa5,0x80,0xa3,0x80,0xa3,0xa6,0xa3,0xa6,0xa3, 0x94,0xa5,0x94,0xa5,0xa6,0xa5,0xa6,0xa5,0x94,0xa3,0x94,0xa3,0xa6,0xa3,0xa6,0xa3, 0x98,0x91,0x98,0x91,0x92,0x91,0x92,0x91,0x98,0xb7,0x98,0xb7,0x92,0xb7,0x92,0xb7, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xb7,0x94,0xb7,0x92,0xb7,0x92,0xb7, 0x98,0xa5,0x98,0xa5,0xa6,0xa5,0xa6,0xa5,0x98,0xb7,0x98,0xb7,0xa6,0xb7,0xa6,0xb7, 0x94,0xa5,0x94,0xa5,0xa6,0xa5,0xa6,0xa5,0x94,0xb7,0x94,0xb7,0xa6,0xb7,0xa6,0xb7, 0x80,0xa9,0x80,0xa9,0xaa,0xa9,0xaa,0xa9,0x80,0xa3,0x80,0xa3,0xaa,0xa3,0xaa,0xa3, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xa3,0xac,0xa3,0xaa,0xa3,0xaa,0xa3, 0x80,0xa5,0x80,0xa5,0xa6,0xa5,0xa6,0xa5,0x80,0xa3,0x80,0xa3,0xa6,0xa3,0xa6,0xa3, 0xac,0xa5,0xac,0xa5,0xa6,0xa5,0xa6,0xa5,0xac,0xa3,0xac,0xa3,0xa6,0xa3,0xa6,0xa3, 0x98,0xa9,0x98,0xa9,0xaa,0xa9,0xaa,0xa9,0x98,0xb7,0x98,0xb7,0xaa,0xb7,0xaa,0xb7, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xb7,0xac,0xb7,0xaa,0xb7,0xaa,0xb7, 0x98,0xa5,0x98,0xa5,0xa6,0xa5,0xa6,0xa5,0x98,0xb7,0x98,0xb7,0xa6,0xb7,0xa6,0xb7, 0xac,0xa5,0xac,0xa5,0xa6,0xa5,0xa6,0xa5,0xac,0xb7,0xac,0xb7,0xa6,0xb7,0xa6,0xb7, 0x80,0x91,0x80,0x91,0x92,0x91,0x92,0x91,0x80,0xbb,0x80,0xbb,0x92,0xbb,0x92,0xbb, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xbb,0x94,0xbb,0x92,0xbb,0x92,0xbb, 0x80,0xbd,0x80,0xbd,0xbe,0xbd,0xbe,0xbd,0x80,0xbb,0x80,0xbb,0xbe,0xbb,0xbe,0xbb, 0x94,0xbd,0x94,0xbd,0xbe,0xbd,0xbe,0xbd,0x94,0xbb,0x94,0xbb,0xbe,0xbb,0xbe,0xbb, 0x98,0x91,0x98,0x91,0x92,0x91,0x92,0x91,0x98,0xb7,0x98,0xb7,0x92,0xb7,0x92,0xb7, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xb7,0x94,0xb7,0x92,0xb7,0x92,0xb7, 0x98,0xbd,0x98,0xbd,0xbe,0xbd,0xbe,0xbd,0x98,0xb7,0x98,0xb7,0xbe,0xb7,0xbe,0xb7, 0x94,0xbd,0x94,0xbd,0xbe,0xbd,0xbe,0xbd,0x94,0xb7,0x94,0xb7,0xbe,0xb7,0xbe,0xb7, 0x80,0xa9,0x80,0xa9,0xaa,0xa9,0xaa,0xa9,0x80,0xbb,0x80,0xbb,0xaa,0xbb,0xaa,0xbb, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xbb,0xac,0xbb,0xaa,0xbb,0xaa,0xbb, 0x80,0xbd,0x80,0xbd,0xbe,0xbd,0xbe,0xbd,0x80,0xbb,0x80,0xbb,0xbe,0xbb,0xbe,0xbb, 0xac,0xbd,0xac,0xbd,0xbe,0xbd,0xbe,0xbd,0xac,0xbb,0xac,0xbb,0xbe,0xbb,0xbe,0xbb, 0x98,0xa9,0x98,0xa9,0xaa,0xa9,0xaa,0xa9,0x98,0xb7,0x98,0xb7,0xaa,0xb7,0xaa,0xb7, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xb7,0xac,0xb7,0xaa,0xb7,0xaa,0xb7, 0x98,0xbd,0x98,0xbd,0xbe,0xbd,0xbe,0xbd,0x98,0xb7,0x98,0xb7,0xbe,0xb7,0xbe,0xb7, 0xac,0xbd,0xac,0xbd,0xbe,0xbd,0xbe,0xbd,0xac,0xb7,0xac,0xb7,0xbe,0xb7,0xbe,0xb7, 0x80,0x91,0x80,0x91,0x92,0x91,0x92,0x91,0x80,0xa3,0x80,0xa3,0x92,0xa3,0x92,0xa3, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xa3,0x94,0xa3,0x92,0xa3,0x92,0xa3, 0x80,0xa5,0x80,0xa5,0xa6,0xa5,0xa6,0xa5,0x80,0xa3,0x80,0xa3,0xa6,0xa3,0xa6,0xa3, 0x94,0xa5,0x94,0xa5,0xa6,0xa5,0xa6,0xa5,0x94,0xa3,0x94,0xa3,0xa6,0xa3,0xa6,0xa3, 0x98,0x91,0x98,0x91,0x92,0x91,0x92,0x91,0x98,0xcf,0x98,0xcf,0x92,0xcf,0x92,0xcf, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xcf,0x94,0xcf,0x92,0xcf,0x92,0xcf, 0x98,0xa5,0x98,0xa5,0xa6,0xa5,0xa6,0xa5,0x98,0xcf,0x98,0xcf,0xa6,0xcf,0xa6,0xcf, 0x94,0xa5,0x94,0xa5,0xa6,0xa5,0xa6,0xa5,0x94,0xcf,0x94,0xcf,0xa6,0xcf,0xa6,0xcf, 0x80,0xa9,0x80,0xa9,0xaa,0xa9,0xaa,0xa9,0x80,0xa3,0x80,0xa3,0xaa,0xa3,0xaa,0xa3, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xa3,0xac,0xa3,0xaa,0xa3,0xaa,0xa3, 0x80,0xa5,0x80,0xa5,0xa6,0xa5,0xa6,0xa5,0x80,0xa3,0x80,0xa3,0xa6,0xa3,0xa6,0xa3, 0xac,0xa5,0xac,0xa5,0xa6,0xa5,0xa6,0xa5,0xac,0xa3,0xac,0xa3,0xa6,0xa3,0xa6,0xa3, 0x98,0xa9,0x98,0xa9,0xaa,0xa9,0xaa,0xa9,0x98,0xcf,0x98,0xcf,0xaa,0xcf,0xaa,0xcf, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xcf,0xac,0xcf,0xaa,0xcf,0xaa,0xcf, 0x98,0xa5,0x98,0xa5,0xa6,0xa5,0xa6,0xa5,0x98,0xcf,0x98,0xcf,0xa6,0xcf,0xa6,0xcf, 0xac,0xa5,0xac,0xa5,0xa6,0xa5,0xa6,0xa5,0xac,0xcf,0xac,0xcf,0xa6,0xcf,0xa6,0xcf, 0x80,0x91,0x80,0x91,0x92,0x91,0x92,0x91,0x80,0xbb,0x80,0xbb,0x92,0xbb,0x92,0xbb, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xbb,0x94,0xbb,0x92,0xbb,0x92,0xbb, 0x80,0xbd,0x80,0xbd,0xbe,0xbd,0xbe,0xbd,0x80,0xbb,0x80,0xbb,0xbe,0xbb,0xbe,0xbb, 0x94,0xbd,0x94,0xbd,0xbe,0xbd,0xbe,0xbd,0x94,0xbb,0x94,0xbb,0xbe,0xbb,0xbe,0xbb, 0x98,0x91,0x98,0x91,0x92,0x91,0x92,0x91,0x98,0xcf,0x98,0xcf,0x92,0xcf,0x92,0xcf, 0x94,0x91,0x94,0x91,0x92,0x91,0x92,0x91,0x94,0xcf,0x94,0xcf,0x92,0xcf,0x92,0xcf, 0x98,0xbd,0x98,0xbd,0xbe,0xbd,0xbe,0xbd,0x98,0xcf,0x98,0xcf,0xbe,0xcf,0xbe,0xcf, 0x94,0xbd,0x94,0xbd,0xbe,0xbd,0xbe,0xbd,0x94,0xcf,0x94,0xcf,0xbe,0xcf,0xbe,0xcf, 0x80,0xa9,0x80,0xa9,0xaa,0xa9,0xaa,0xa9,0x80,0xbb,0x80,0xbb,0xaa,0xbb,0xaa,0xbb, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xbb,0xac,0xbb,0xaa,0xbb,0xaa,0xbb, 0x80,0xbd,0x80,0xbd,0xbe,0xbd,0xbe,0xbd,0x80,0xbb,0x80,0xbb,0xbe,0xbb,0xbe,0xbb, 0xac,0xbd,0xac,0xbd,0xbe,0xbd,0xbe,0xbd,0xac,0xbb,0xac,0xbb,0xbe,0xbb,0xbe,0xbb, 0x98,0xa9,0x98,0xa9,0xaa,0xa9,0xaa,0xa9,0x98,0xcf,0x98,0xcf,0xaa,0xcf,0xaa,0xcf, 0xac,0xa9,0xac,0xa9,0xaa,0xa9,0xaa,0xa9,0xac,0xcf,0xac,0xcf,0xaa,0xcf,0xaa,0xcf, 0x98,0xbd,0x98,0xbd,0xbe,0xbd,0xbe,0xbd,0x98,0xcf,0x98,0xcf,0xbe,0xcf,0xbe,0xcf, 0xac,0xbd,0xac,0xbd,0xbe,0xbd,0xbe,0xbd,0xac,0xcf,0xac,0xcf,0xbe,0xcf,0xbe,0xcf }; /* * @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte * code length, given an input mask of up to 96 bits. * * Implemented by doing 8 consecutive lookups, building the result 4-bit at a time */ inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2) { uint32_t m, v, m4, n; v = v0; m4 = k_len3lut[v & 0x3ff]; m = m4 & 0xf; n = m4 >> 4; // 8..12 v = v0 >> n; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 4; n += m4 >> 4; // 16..24 v = __funnelshift_r(v0, v1, n); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 8; n += m4 >> 4; // 24..36 v >>= (m4 >> 4); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 12; n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16 v1 = __funnelshift_r(v1, v2, n); v2 >>= n; v = v1; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 16; n = m4 >> 4; // 8..12 v = v1 >> n; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 20; n += m4 >> 4; // 16..24 v = __funnelshift_r(v1, v2, n); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 24; n += m4 >> 4; // 24..36 v >>= (m4 >> 4); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 28; return m; } /* * @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length * minus 2, given two input masks each containing bit0 or bit1 of the corresponding * code length minus 2 for up to 32 bytes */ inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1) { uint32_t m; m = (v1 & 1) * 2 + (v0 & 1); v0 >>= (m + 2); v1 >>= (m + 1); for (uint32_t i = 1; i < 16; i++) { uint32_t m2 = (v1 & 2) | (v0 & 1); uint32_t n = m2 + 2; m |= m2 << (i * 2); v0 >>= n; v1 >>= n; } return m; } #define READ_BYTE(pos) s->q.buf[(pos) & (PREFETCH_SIZE-1)] __device__ void snappy_decode_symbols(unsnap_state_s *s, uint32_t t) { uint32_t cur = 0; uint32_t end = static_cast<uint32_t>(s->end - s->base); uint32_t bytes_left = s->uncompressed_size; uint32_t dst_pos = 0; int32_t batch = 0; for (;;) { int32_t batch_len; volatile unsnap_batch_s *b; // Wait for prefetcher if (t == 0) { s->q.prefetch_rdpos = cur; #pragma unroll(1) // We don't want unrolling here while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); } b = &s->q.batch[batch * BATCH_SIZE]; } // Process small symbols in parallel: for data that does not get good compression, // the stream will consist of a large number of short literals (1-byte or 2-byte) // followed by short repeat runs. This results in many 2-byte or 3-byte symbols // that can all be decoded in parallel once we know the symbol length. { uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask; uint32_t b0; cur = SHFL0(cur); cur_t = cur + t; b0 = READ_BYTE(cur_t); v0 = BALLOT((b0 == 4) || (b0 & 2)); b0 = READ_BYTE(cur_t + 32); v1 = BALLOT((b0 == 4) || (b0 & 2)); b0 = READ_BYTE(cur_t + 64); v2 = BALLOT((b0 == 4) || (b0 & 2)); len3_mask = SHFL0((t == 0) ? get_len3_mask(v0, v1, v2) : 0); cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1)); b0 = READ_BYTE(cur_t); is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0); short_sym_mask = BALLOT(is_long_sym); batch_len = 0; b = reinterpret_cast<volatile unsnap_batch_s *>(SHFL0(reinterpret_cast<uintptr_t>(b))); if (!(short_sym_mask & 1)) { batch_len = SHFL0((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0); if (batch_len != 0) { uint32_t blen = 0; int32_t ofs = 0; if (t < batch_len) { blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1); ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1) : (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8) : -(int32_t)(cur_t + 1); b[t].len = blen; b[t].offset = ofs; ofs += blen; // for correct out-of-range detection below } blen = WarpReducePos32(blen, t); bytes_left = SHFL0(bytes_left); dst_pos = SHFL0(dst_pos); short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen))); if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); } if (batch_len != 0) { blen = SHFL(blen, batch_len - 1); cur = SHFL(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1); if (t == 0) { dst_pos += blen; bytes_left -= blen; } } } } // Check if the batch was stopped by a 3-byte or 4-byte literal if (batch_len < BATCH_SIZE-2 && SHFL(b0 & ~4, batch_len) == 8) { // If so, run a slower version of the above that can also handle 3/4-byte literal sequences uint32_t batch_add; do { uint32_t clen, mask_t; cur_t = cur + t; b0 = READ_BYTE(cur_t); clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2 v0 = BALLOT(clen & 1); v1 = BALLOT((clen >> 1) & 1); len3_mask = SHFL0((t == 0) ? get_len5_mask(v0, v1) : 0); mask_t = (1 << (2 * t)) - 1; cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) + __popc((len3_mask & 0x55555555) & mask_t); b0 = READ_BYTE(cur_t); is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3*4)) || (cur_t >= cur + 32) || (batch_len + t >= BATCH_SIZE); batch_add = __ffs(BALLOT(is_long_sym)) - 1; if (batch_add != 0) { uint32_t blen = 0; int32_t ofs = 0; if (t < batch_add) { blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1); ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1) : (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8) : -(int32_t)(cur_t + 1); b[batch_len + t].len = blen; b[batch_len + t].offset = ofs; ofs += blen; // for correct out-of-range detection below } blen = WarpReducePos32(blen, t); bytes_left = SHFL0(bytes_left); dst_pos = SHFL0(dst_pos); short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen))); if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); } if (batch_add != 0) { blen = SHFL(blen, batch_add - 1); cur = SHFL(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add-1) * 2)) & 3); if (t == 0) { dst_pos += blen; bytes_left -= blen; } batch_len += batch_add; } } } while (batch_add >= 6 && batch_len < BATCH_SIZE-2); } } if (t == 0) { while (bytes_left > 0 && batch_len < BATCH_SIZE) { uint32_t blen, offset; uint8_t b0 = READ_BYTE(cur); if (b0 & 3) { uint8_t b1 = READ_BYTE(cur+1); if (!(b0 & 2)) { // xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset offset = ((b0 & 0xe0) << 3) | b1; blen = ((b0 >> 2) & 7) + 4; cur += 2; } else { // xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset offset = b1 | (READ_BYTE(cur+2) << 8); if (b0 & 1) // 4-byte offset { offset |= (READ_BYTE(cur+3) << 16) | (READ_BYTE(cur + 4) << 24); cur += 5; } else { cur += 3; } blen = (b0 >> 2) + 1; } dst_pos += blen; if (offset - 1u >= dst_pos || bytes_left < blen) break; bytes_left -= blen; } else if (b0 < 4*4) { // 0000xx00: short literal blen = (b0 >> 2) + 1; offset = -(int32_t)(cur + 1); cur += 1 + blen; dst_pos += blen; if (bytes_left < blen) break; bytes_left -= blen; } else { // xxxxxx00: literal blen = b0 >> 2; if (blen >= 60) { uint32_t num_bytes = blen - 59; blen = READ_BYTE(cur + 1); if (num_bytes > 1) { blen |= READ_BYTE(cur + 2) << 8; if (num_bytes > 2) { blen |= READ_BYTE(cur + 3) << 16; if (num_bytes > 3) { blen |= READ_BYTE(cur + 4) << 24; } } } cur += num_bytes; } cur += 1; blen += 1; offset = -(int32_t)cur; cur += blen; // Wait for prefetcher s->q.prefetch_rdpos = cur; #pragma unroll(1) // We don't want unrolling here while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); } dst_pos += blen; if (bytes_left < blen) break; bytes_left -= blen; } b[batch_len].len = blen; b[batch_len].offset = offset; batch_len++; } if (batch_len != 0) { s->q.batch_len[batch] = batch_len; batch = (batch + 1) & (BATCH_COUNT - 1); } } batch_len = SHFL0(batch_len); if (t == 0) { while (s->q.batch_len[batch] != 0) { NANOSLEEP(100); } } if (batch_len != BATCH_SIZE) { break; } } if (!t) { s->q.prefetch_end = 1; s->q.batch_len[batch] = -1; s->bytes_left = bytes_left; if (bytes_left != 0) { s->error = -2; } } } // WARP1: process symbols and output uncompressed stream // NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that would result in out-of-bounds accesses) __device__ void snappy_process_symbols(unsnap_state_s *s, int t) { const uint8_t *literal_base = s->base; uint8_t *out = reinterpret_cast<uint8_t *>(s->in.dstDevice); int batch = 0; do { volatile unsnap_batch_s *b = &s->q.batch[batch * BATCH_SIZE]; int32_t batch_len, blen_t, dist_t; if (t == 0) { while ((batch_len = s->q.batch_len[batch]) == 0) { NANOSLEEP(100); } } else { batch_len = 0; } batch_len = SHFL0(batch_len); if (batch_len <= 0) { break; } if (t < batch_len) { blen_t = b[t].len; dist_t = b[t].offset; } else { blen_t = dist_t = 0; } // Try to combine as many small entries as possible, but try to avoid doing that // if we see a small repeat distance 8 bytes or less if (SHFL0(min((uint32_t)dist_t, (uint32_t)SHFL_XOR(dist_t, 1))) > 8) { uint32_t n; do { uint32_t bofs = WarpReducePos32(blen_t, t); uint32_t stop_mask = BALLOT((uint32_t)dist_t < bofs); uint32_t start_mask = WarpReduceSum32((bofs < 32 && t < batch_len) ? 1 << bofs : 0); n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)), (uint32_t)batch_len); if (n != 0) { uint32_t it = __popc(start_mask & ((2 << t) - 1)); uint32_t tr = t - SHFL(bofs - blen_t, it); int32_t dist = SHFL(dist_t, it); if (it < n) { const uint8_t *src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist); out[t] = *src; } out += SHFL(bofs, n - 1); blen_t = SHFL(blen_t, (n + t) & 0x1f); dist_t = SHFL(dist_t, (n + t) & 0x1f); batch_len -= n; } } while (n >= 4); } for (int i = 0; i < batch_len; i++) { int32_t blen = SHFL(blen_t, i); int32_t dist = SHFL(dist_t, i); int32_t blen2 = (i + 1 < batch_len) ? SHFL(blen_t, i + 1) : 32; // Try to combine consecutive small entries if they are independent if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) { int32_t dist2 = SHFL(dist_t, i+1); if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) { int32_t d; if (t < blen) { d = dist; } else { dist = dist2; d = (dist2 <= 0) ? dist2 + blen : dist2; } blen += blen2; if (t < blen) { const uint8_t *src = (dist > 0) ? (out - d) : (literal_base - d); out[t] = src[t]; } out += blen; i++; continue; } } if (dist > 0) { // Copy uint8_t b0, b1; if (t < blen) { uint32_t pos = t; const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist; b0 = *src; } if (32 + t < blen) { uint32_t pos = 32 + t; const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist; b1 = *src; } if (t < blen) { out[t] = b0; } if (32 + t < blen) { out[32 + t] = b1; } } else { // Literal uint8_t b0, b1; dist = -dist; while (blen >= 64) { b0 = literal_base[dist + t]; b1 = literal_base[dist + 32 + t]; out[t] = b0; out[32 + t] = b1; dist += 64; out += 64; blen -= 64; } if (t < blen) { b0 = literal_base[dist + t]; } if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; } if (t < blen) { out[t] = b0; } if (32 + t < blen) { out[32 + t] = b1; } } out += blen; } SYNCWARP(); if (t == 0) { s->q.batch_len[batch] = 0; } batch = (batch + 1) & (BATCH_COUNT - 1); } while (1); } // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) unsnap_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs) { __shared__ __align__(16) unsnap_state_s state_g; int t = threadIdx.x; unsnap_state_s *s = &state_g; int strm_id = blockIdx.x; if (t < sizeof(gpu_inflate_input_s) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->in)[t] = reinterpret_cast<const uint32_t *>(&inputs[strm_id])[t]; __threadfence_block(); } if (t < BATCH_COUNT) { s->q.batch_len[t] = 0; } __syncthreads(); if (!t) { const uint8_t *cur = reinterpret_cast<const uint8_t *>(s->in.srcDevice); const uint8_t *end = cur + s->in.srcSize; s->error = 0; #if LOG_CYCLECOUNT s->tstart = clock(); #endif if (cur < end) { // Read uncompressed size (varint), limited to 32-bit uint32_t uncompressed_size = *cur++; if (uncompressed_size > 0x7f) { uint32_t c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & 0x7f) | (c << 7); if (uncompressed_size >= (0x80 << 7)) { c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14); if (uncompressed_size >= (0x80 << 14)) { c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21); if (uncompressed_size >= (0x80 << 21)) { c = (cur < end) ? *cur++ : 0; if (c < 0x8) uncompressed_size = (uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 28); else s->error = -1; } } } } s->uncompressed_size = uncompressed_size; s->bytes_left = uncompressed_size; s->base = cur; s->end = end; if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->in.dstSize)) { s->error = -1; } } else { s->error = -1; } s->q.prefetch_end = 0; s->q.prefetch_wrpos = 0; s->q.prefetch_rdpos = 0; } __syncthreads(); if (!s->error) { if (t < 32) { // WARP0: decode lengths and offsets snappy_decode_symbols(s, t); } else if (t < 64) { // WARP1: prefetch byte stream for WARP0 snappy_prefetch_bytestream(s, t & 0x1f); } else if (t < 96) { // WARP2: LZ77 snappy_process_symbols(s, t & 0x1f); } __syncthreads(); } if (!t) { outputs[strm_id].bytes_written = s->uncompressed_size - s->bytes_left; outputs[strm_id].status = s->error; #if LOG_CYCLECOUNT outputs[strm_id].reserved = clock() - s->tstart; #else outputs[strm_id].reserved = 0; #endif } } cudaError_t __host__ gpu_unsnap(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int count, cudaStream_t stream) { uint32_t count32 = (count > 0) ? count : 0; dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block dim3 dim_grid(count32, 1); // TODO: Check max grid dimensions vs max expected count unsnap_kernel <<< dim_grid, dim_block, 0, stream >>>(inputs, outputs); return cudaSuccess; } } // namespace io } // namespace cudf
c68cdbdb458a40784a4260936d5c0958b55abefc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* **************************************** * * CUDA Kernel: matrix minus * */ /* **************************************** * * sub2ind - Column-major indexing of 2D arrays * */ template <typename T> __device__ __forceinline__ T sub2ind( T i, T j, T height ) { return (i + height*j); } // end function 'sub2ind' /* **************************************** * * core kernel * */ __global__ void apply_filter(double * Iedit, const double * ILaplace, const double * B, const int * Mask, const int m, const int n, const int p){ /* thread indices */ const int j = blockIdx.y*blockDim.y+threadIdx.y; const int i = blockIdx.x*blockDim.x+threadIdx.x; /* matrix calculation */ if ((i >= m) || (j >= n*p) || (Mask[sub2ind(i,j,m)]==0) ){ return; } Iedit[sub2ind(i,j,m)] = (ILaplace[sub2ind(i,j,m)]-B[sub2ind(i,j,m)])/4.0; return ; }
c68cdbdb458a40784a4260936d5c0958b55abefc.cu
/* **************************************** * * CUDA Kernel: matrix minus * */ /* **************************************** * * sub2ind - Column-major indexing of 2D arrays * */ template <typename T> __device__ __forceinline__ T sub2ind( T i, T j, T height ) { return (i + height*j); } // end function 'sub2ind' /* **************************************** * * core kernel * */ __global__ void apply_filter(double * Iedit, const double * ILaplace, const double * B, const int * Mask, const int m, const int n, const int p){ /* thread indices */ const int j = blockIdx.y*blockDim.y+threadIdx.y; const int i = blockIdx.x*blockDim.x+threadIdx.x; /* matrix calculation */ if ((i >= m) || (j >= n*p) || (Mask[sub2ind(i,j,m)]==0) ){ return; } Iedit[sub2ind(i,j,m)] = (ILaplace[sub2ind(i,j,m)]-B[sub2ind(i,j,m)])/4.0; return ; }
cb3b55cb8056c439127b92de3c7b56366aaaae49.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <fstream> #include <iostream> #include <vector> #include "dir-enum.hh" #include "geometry3_d.hh" #include "plane.hh" #include "trop-enum.hh" #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> // #include <vector_functions.h> __device__ inline double3 operator*(const double d, const double3 d3) { const double xval = d * d3.x; const double yval = d * d3.y; const double zval = d * d3.z; return make_double3(xval, yval, zval); } __device__ inline double3 operator+(const double3 d3a, const double3 d3b) { const double xval = d3a.x * d3b.x; const double yval = d3a.y * d3b.y; const double zval = d3a.z * d3b.z; return make_double3(xval, yval, zval); } __device__ bool outofbounds(const coord3d_d& pos, int64_t nx, int64_t ny, int64_t nz) { if (pos.x[0] >= nx - 1 || pos.x[1] >= ny - 1 || pos.x[2] >= nz - 1 || pos.x[0] < 0 || pos.x[1] < 0 || pos.x[2] < 0) { return true; } return false; } // trilinear interpolation __device__ bool getvector_v1(const coord3d_d& pos, const hipPitchedPtr field_d, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d& res_vec) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) { // printf("x %f, y %f, z %f \n", pos[0], pos[1], pos[2]); // } char* ptr = (char*)field_d.ptr; // pitch being xdim * sizeof(thing) rounded up to a multiple of 32 size_t pitch = field_d.pitch; if (outofbounds(pos, nx, ny, nz)) return false; double x = pos[0]; double y = pos[1]; double z = pos[2]; int x0 = int(floor(pos[0])); int x1 = x0 + 1; int y0 = int(floor(pos[1])); int y1 = y0 + 1; int z0 = int(floor(pos[2])); int z1 = z0 + 1; coord3d_d v000 = *(coord3d_d*)(ptr + (pitch * ny * z0 + pitch * y0 + x0 * sizeof(coord3d_d))); coord3d_d v001 = *(coord3d_d*)(ptr + (pitch * ny * z1 + pitch * y0 + x0 * sizeof(coord3d_d))); coord3d_d v010 = *(coord3d_d*)(ptr + (pitch * ny * z0 + pitch * y1 + x0 * sizeof(coord3d_d))); coord3d_d v011 = *(coord3d_d*)(ptr + (pitch * ny * z1 + pitch * y1 + x0 * sizeof(coord3d_d))); coord3d_d v100 = *(coord3d_d*)(ptr + (pitch * ny * z0 + pitch * y0 + x1 * sizeof(coord3d_d))); coord3d_d v101 = *(coord3d_d*)(ptr + (pitch * ny * z1 + pitch * y0 + x1 * sizeof(coord3d_d))); coord3d_d v110 = *(coord3d_d*)(ptr + (pitch * ny * z0 + pitch * y1 + x1 * sizeof(coord3d_d))); coord3d_d v111 = *(coord3d_d*)(ptr + (pitch * ny * z1 + pitch * y1 + x1 * sizeof(coord3d_d))); coord3d_d aux0 = (x1 - x) * v000 + (x - x0) * v100; coord3d_d aux1 = (x1 - x) * v010 + (x - x0) * v110; coord3d_d aux2 = (x1 - x) * v001 + (x - x0) * v101; coord3d_d aux3 = (x1 - x) * v011 + (x - x0) * v111; coord3d_d aux4 = (y1 - y) * aux0 + (y - y0) * aux1; coord3d_d aux5 = (y1 - y) * aux2 + (y - y0) * aux3; res_vec = (z1 - z) * aux4 + (z - z0) * aux5; return true; } __device__ inline coord3d_d get_val_v2(const int x, const int y, const int z, hipTextureObject_t field_x, hipTextureObject_t field_y, hipTextureObject_t field_z) { int2 xvalint2 = tex3D<int2>(field_x, x, y, z); double xval = __hiloint2double(xvalint2.y, xvalint2.x); int2 yvalint2 = tex3D<int2>(field_y, x, y, z); double yval = __hiloint2double(yvalint2.y, yvalint2.x); int2 zvalint2 = tex3D<int2>(field_z, x, y, z); double zval = __hiloint2double(zvalint2.y, zvalint2.x); return coord3d_d(xval, yval, zval); } // trilinear interpolation __device__ bool getvector_v2(const coord3d_d& pos, hipTextureObject_t field_x, hipTextureObject_t field_y, hipTextureObject_t field_z, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d& res_vec) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) { // printf("x %f, y %f, z %f \n", pos[0], pos[1], pos[2]); // } if (outofbounds(pos, nx, ny, nz)) return false; double x = pos[0]; double y = pos[1]; double z = pos[2]; int x0 = int(floor(pos[0])); int x1 = x0 + 1; int y0 = int(floor(pos[1])); int y1 = y0 + 1; int z0 = int(floor(pos[2])); int z1 = z0 + 1; coord3d_d v000 = get_val_v2(x0, y0, z0, field_x, field_y, field_z); coord3d_d v001 = get_val_v2(x0, y0, z1, field_x, field_y, field_z); coord3d_d v010 = get_val_v2(x0, y1, z0, field_x, field_y, field_z); coord3d_d v011 = get_val_v2(x0, y1, z1, field_x, field_y, field_z); coord3d_d v100 = get_val_v2(x1, y0, z0, field_x, field_y, field_z); coord3d_d v101 = get_val_v2(x1, y0, z1, field_x, field_y, field_z); coord3d_d v110 = get_val_v2(x1, y1, z0, field_x, field_y, field_z); coord3d_d v111 = get_val_v2(x1, y1, z1, field_x, field_y, field_z); coord3d_d aux0 = (x1 - x) * v000 + (x - x0) * v100; coord3d_d aux1 = (x1 - x) * v010 + (x - x0) * v110; coord3d_d aux2 = (x1 - x) * v001 + (x - x0) * v101; coord3d_d aux3 = (x1 - x) * v011 + (x - x0) * v111; coord3d_d aux4 = (y1 - y) * aux0 + (y - y0) * aux1; coord3d_d aux5 = (y1 - y) * aux2 + (y - y0) * aux3; res_vec = (z1 - z) * aux4 + (z - z0) * aux5; return true; } // trilinear interpolation __device__ inline bool getvector_v3(const coord3d_d& pos, hipTextureObject_t field_d, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d& res_vec) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) { // printf("x %f, y %f, z %f \n", pos[0], pos[1], pos[2]); // } if (outofbounds(pos, nx, ny, nz)) return false; double x = pos[0]; double y = pos[1]; double z = pos[2]; float4 val = tex3D<float4>(field_d, x, y, z); res_vec = coord3d_d(val.x, val.y, val.z); return true; } // Runge-Kutta method, 4th order // c --> positions, k --> vectors at c __device__ bool extend_rungekutta_v1(const hipPitchedPtr field_d, const int64_t nx, const int64_t ny, const int64_t nz, const coord3d_d& prevpos, float step_length, coord3d_d& newpos) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; coord3d_d c0 = prevpos; coord3d_d k0; bool good = getvector_v1(c0, field_d, nx, ny, nz, k0); k0 = k0.normalised() * step_length; const coord3d_d c1 = c0 + k0 * 0.5; coord3d_d k1; good = getvector_v1(c1, field_d, nx, ny, nz, k1); if (!good) return false; k1 = k1.normalised() * step_length; const coord3d_d c2 = c0 + k1 * 0.5; coord3d_d k2; good = getvector_v1(c2, field_d, nx, ny, nz, k2); if (!good) return false; k2 = k2.normalised() * step_length; const coord3d_d c3 = c0 + k2; coord3d_d k3; good = getvector_v1(c3, field_d, nx, ny, nz, k3); if (!good) return false; k3 = k3.normalised() * step_length; const coord3d_d c4 = c0 + (k0 + k1 * 2.0 + k2 * 2.0 + k3) / 6.0; coord3d_d k4; good = getvector_v1(c4, field_d, nx, ny, nz, k4); if (!good) return false; newpos = c4; return true; } // Runge-Kutta method, 4th order // c --> positions, k --> vectors at c __device__ bool extend_rungekutta_v2(const hipTextureObject_t field_x, const hipTextureObject_t field_y, const hipTextureObject_t field_z, const int64_t nx, const int64_t ny, const int64_t nz, const coord3d_d& prevpos, float step_length, coord3d_d& newpos) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; coord3d_d c0 = prevpos; coord3d_d k0; bool good = getvector_v2(c0, field_x, field_y, field_z, nx, ny, nz, k0); k0 = k0.normalised() * step_length; const coord3d_d c1 = c0 + k0 * 0.5; coord3d_d k1; good = getvector_v2(c1, field_x, field_y, field_z, nx, ny, nz, k1); if (!good) return false; k1 = k1.normalised() * step_length; const coord3d_d c2 = c0 + k1 * 0.5; coord3d_d k2; good = getvector_v2(c2, field_x, field_y, field_z, nx, ny, nz, k2); if (!good) return false; k2 = k2.normalised() * step_length; const coord3d_d c3 = c0 + k2; coord3d_d k3; good = getvector_v2(c3, field_x, field_y, field_z, nx, ny, nz, k3); if (!good) return false; k3 = k3.normalised() * step_length; const coord3d_d c4 = c0 + (k0 + k1 * 2.0 + k2 * 2.0 + k3) / 6.0; coord3d_d k4; good = getvector_v2(c4, field_x, field_y, field_z, nx, ny, nz, k4); if (!good) return false; newpos = c4; return true; } // Runge-Kutta method, 4th order // c --> positions, k --> vectors at c __device__ bool extend_rungekutta_v3(const hipTextureObject_t field_d, const int64_t nx, const int64_t ny, const int64_t nz, const coord3d_d& prevpos, float step_length, coord3d_d& newpos) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; coord3d_d c0 = prevpos; coord3d_d k0; bool good = getvector_v3(c0, field_d, nx, ny, nz, k0); k0 = k0.normalised() * step_length; const coord3d_d c1 = c0 + k0 * 0.5; coord3d_d k1; good = getvector_v3(c1, field_d, nx, ny, nz, k1); if (!good) return false; k1 = k1.normalised() * step_length; const coord3d_d c2 = c0 + k1 * 0.5; coord3d_d k2; good = getvector_v3(c2, field_d, nx, ny, nz, k2); if (!good) return false; k2 = k2.normalised() * step_length; const coord3d_d c3 = c0 + k2; coord3d_d k3; good = getvector_v3(c3, field_d, nx, ny, nz, k3); if (!good) return false; k3 = k3.normalised() * step_length; const coord3d_d c4 = c0 + (k0 + k1 * 2.0 + k2 * 2.0 + k3) / 6.0; coord3d_d k4; good = getvector_v3(c4, field_d, nx, ny, nz, k4); if (!good) return false; newpos = c4; return true; } __device__ void complete_trajectory_v1(const hipPitchedPtr field_d, const int nx, const int ny, const int nz, coord3d_d* __restrict__ positions, int& index, int max_points_traj, float return_ratio, float step_length, bool& out_of_bounds) { // const int indx = threadIdx.x + blockIdx.x * blockDim.x; out_of_bounds = false; double dist2farthest = -1; // if this is set at 0 at declaration, the following while loop will never run if (index > 0) { for (int i = 0; i <= index; i++) dist2farthest = ::max(dist2farthest, (positions[i] - positions[0]).norm()); } // if we get to a point that is less than return_ratio of the longest distance in the trajectory while ((positions[index] - positions[0]).norm() > return_ratio * dist2farthest) { if (!extend_rungekutta_v1(field_d, nx, ny, nz, positions[index], step_length, positions[index + 1])) { out_of_bounds = true; // printf("%d: %d oob\n", indx, index); return; } index++; dist2farthest = ::max(dist2farthest, (positions[index] - positions[0]).norm()); if (index == max_points_traj - 2) { step_length *= 1.5; index = 0; dist2farthest = -1; } } // printf("%d: %d\n", indx, index); } __device__ void complete_trajectory_v2(const hipTextureObject_t field_x, const hipTextureObject_t field_y, const hipTextureObject_t field_z, const int nx, const int ny, const int nz, coord3d_d* __restrict__ positions, int& index, int max_points_traj, float return_ratio, float step_length, bool& out_of_bounds) { // const int indx = threadIdx.x + blockIdx.x * blockDim.x; out_of_bounds = false; double dist2farthest = -1; // if this is set at 0 at declaration, the following while loop will never run if (index > 0) { for (int i = 0; i <= index; i++) dist2farthest = ::max(dist2farthest, (positions[i] - positions[0]).norm()); } // if we get to a point that is less than return_ratio of the longest distance in the trajectory while ((positions[index] - positions[0]).norm() > return_ratio * dist2farthest) { if (!extend_rungekutta_v2(field_x, field_y, field_z, nx, ny, nz, positions[index], step_length, positions[index + 1])) { out_of_bounds = true; // printf("%d: %d oob\n", indx, index); return; } index++; dist2farthest = ::max(dist2farthest, (positions[index] - positions[0]).norm()); if (index == max_points_traj - 2) { step_length *= 1.5; index = 0; dist2farthest = -1; } } // printf("%d: %d\n", indx, index); } __device__ void complete_trajectory_v3(const hipTextureObject_t field_d, const int nx, const int ny, const int nz, coord3d_d* __restrict__ positions, int& index, int max_points_traj, float return_ratio, float step_length, bool& out_of_bounds) { // const int indx = threadIdx.x + blockIdx.x * blockDim.x; out_of_bounds = false; double dist2farthest = -1; // if this is set at 0 at declaration, the following while loop will never run if (index > 0) { for (int i = 0; i <= index; i++) dist2farthest = ::max(dist2farthest, (positions[i] - positions[0]).norm()); } // if we get to a point that is less than return_ratio of the longest distance in the trajectory while ((positions[index] - positions[0]).norm() > return_ratio * dist2farthest) { if (!extend_rungekutta_v3(field_d, nx, ny, nz, positions[index], step_length, positions[index + 1])) { out_of_bounds = true; // printf("%d: %d oob\n", indx, index); return; } index++; dist2farthest = ::max(dist2farthest, (positions[index] - positions[0]).norm()); if (index == max_points_traj - 2) { step_length *= 1.5; index = 0; dist2farthest = -1; } } // printf("%d: %d\n", indx, index); } __device__ Tropicity classify_trajectory(const coord3d_d* __restrict__ positions, int n_points_in_traj, Direction bfielddir, bool out_of_bounds) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; // printf("p in traj %d: %d\n", indx, n_points_in_traj); coord3d_d bfield; switch (bfielddir) { case Direction::pos_x: { bfield = coord3d_d(1, 0, 0); break; } case Direction::neg_x: { bfield = coord3d_d(-1, 0, 0); break; } case Direction::pos_y: { bfield = coord3d_d(0, 1, 0); break; } case Direction::neg_y: { bfield = coord3d_d(0, -1, 0); break; } case Direction::pos_z: { bfield = coord3d_d(0, 0, 1); break; } case Direction::neg_z: { bfield = coord3d_d(0, 0, -1); break; } default: { return Tropicity::input_error; } } if (out_of_bounds) return Tropicity::outofbounds; coord3d_d crosssum(0, 0, 0); for (size_t i = 0; i < n_points_in_traj; i++) { crosssum += positions[(i - 1 + n_points_in_traj) % n_points_in_traj].cross(positions[i]); } // crossum += positions[positions.size()-1].cross(positions[0]); // if (indx < 40) printf("cross: %f/%f/%f\n", crosssum[0], crosssum[1], crosssum[2]); double dot_product = bfield.dot(crosssum); if (dot_product > 0) return Tropicity::paratropic; else if (dot_product < 0) return Tropicity::diatropic; else return Tropicity::unclassifyable; } __global__ void classify_points_kernel_v1(coord3d_d* __restrict__ points, int64_t n_points, const hipPitchedPtr field_d, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d* __restrict__ trajectories_d, float step_length, int64_t max_points_traj, Direction bfielddir, Tropicity* __restrict__ tropicities_d) { const int32_t indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) printf("hello from the gpu: %d\n", indx); if (indx > n_points - 1) return; coord3d_d vec(0, 0, 0); // if (indx < 2) printf("pos %d %f/%f/%f\n", indx, points[indx][0], points[indx][1], points[indx][2]); bool good = getvector_v1(points[indx], field_d, nx, ny, nz, vec); // if (indx < 2) printf("found vec %d %d: %f/%f/%f\n", indx, good, vec[0], vec[1], vec[2]); if (!good) { tropicities_d[indx] = Tropicity::outofbounds; return; } bool out_of_bounds; int current_index_in_traj = 0; float return_ratio = 0.2; trajectories_d[max_points_traj * indx] = points[indx]; complete_trajectory_v1(field_d, nx, ny, nz, trajectories_d + max_points_traj * indx, current_index_in_traj, max_points_traj, return_ratio, step_length, out_of_bounds); tropicities_d[indx] = classify_trajectory(trajectories_d + max_points_traj * indx, current_index_in_traj + 1, bfielddir, out_of_bounds); } __global__ void classify_points_kernel_v2(coord3d_d* __restrict__ points, int64_t n_points, hipTextureObject_t field_x, hipTextureObject_t field_y, hipTextureObject_t field_z, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d* __restrict__ trajectories_d, float step_length, int64_t max_points_traj, Direction bfielddir, Tropicity* __restrict__ tropicities_d) { const int32_t indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) printf("hello from the gpu: %d\n", indx); if (indx > n_points - 1) return; coord3d_d vec(0, 0, 0); // if (indx < 2) printf("pos %d %f/%f/%f\n", indx, points[indx][0], points[indx][1], points[indx][2]); bool good = getvector_v2(points[indx], field_x, field_y, field_z, nx, ny, nz, vec); // if (indx < 2) printf("found vec %d %d: %f/%f/%f\n", indx, good, vec[0], vec[1], vec[2]); if (!good) { tropicities_d[indx] = Tropicity::outofbounds; return; } bool out_of_bounds; int current_index_in_traj = 0; float return_ratio = 0.2; trajectories_d[max_points_traj * indx] = points[indx]; complete_trajectory_v2(field_x, field_y, field_z, nx, ny, nz, trajectories_d + max_points_traj * indx, current_index_in_traj, max_points_traj, return_ratio, step_length, out_of_bounds); tropicities_d[indx] = classify_trajectory(trajectories_d + max_points_traj * indx, current_index_in_traj + 1, bfielddir, out_of_bounds); } __global__ void classify_points_kernel_v3(coord3d_d* __restrict__ points, int64_t n_points, hipTextureObject_t field_d, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d* __restrict__ trajectories_d, float step_length, int64_t max_points_traj, Direction bfielddir, Tropicity* __restrict__ tropicities_d) { const int32_t indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) printf("hello from the gpu: %d\n", indx); if (indx > n_points - 1) { return; } coord3d_d vec(0, 0, 0); // if (indx < 2) printf("pos %d %f/%f/%f\n", indx, points[indx][0], points[indx][1], points[indx][2]); bool good = getvector_v3(points[indx], field_d, nx, ny, nz, vec); // if (indx < 2) printf("found vec %d %d: %f/%f/%f\n", indx, good, vec[0], vec[1], vec[2]); if (!good) { tropicities_d[indx] = Tropicity::outofbounds; return; } bool out_of_bounds; int current_index_in_traj = 0; float return_ratio = 0.2; trajectories_d[max_points_traj * indx] = points[indx]; complete_trajectory_v3(field_d, nx, ny, nz, trajectories_d + max_points_traj * indx, current_index_in_traj, max_points_traj, return_ratio, step_length, out_of_bounds); tropicities_d[indx] = classify_trajectory(trajectories_d + max_points_traj * indx, current_index_in_traj + 1, bfielddir, out_of_bounds); } std::vector<Tropicity> classify_points_cudax_v1(const double* field_a, const int64_t nx, const int64_t ny, const int64_t nz, double* origin_a, double* spacing_a, const double* start_points_a, int64_t n_points, Direction bfielddir) { // std::cout << __PRETTY_FUNCTION__ << std::endl; #if 0 float steplength = 0.01; #else float step_length_ratio = 0.05; float step_length = step_length_ratio * spacing_a[0]; #endif int64_t max_points_traj = 10000; std::vector<Tropicity> res(n_points); coord3d_d* field = new coord3d_d[nx * ny * nz]; coord3d_d* start_points = new coord3d_d[n_points]; coord3d_d* start_points_d; Tropicity* res_d; coord3d_d* trajectories_d; for (int64_t i = 0; i < nx * ny * nz; i++) for (int64_t j = 0; j < 3; j++) field[i][j] = field_a[3 * i + j]; for (int64_t i = 0; i < n_points; i++) for (int64_t j = 0; j < 3; j++) start_points[i][j] = start_points_a[3 * i + j]; hipPitchedPtr field_d; hipExtent field_extent = make_hipExtent(nx * sizeof(coord3d_d), ny, nz); hipMalloc3D(&field_d, field_extent); hipMemcpy3DParms memCopyParameters = {0}; memCopyParameters.srcPtr = make_hipPitchedPtr(field, nx * sizeof(coord3d_d), ny, nz); memCopyParameters.dstPtr = field_d; memCopyParameters.extent = field_extent; memCopyParameters.kind = hipMemcpyHostToDevice; hipMemcpy3DAsync(&memCopyParameters, 0); // printf("nx, ny, nz %lu, %lu, %lu\n", field_d.pitch, field_d.xsize, field_d.ysize); // alloc hipMalloc((void**)&start_points_d, n_points * sizeof(coord3d_d)); hipMalloc((void**)&trajectories_d, n_points * max_points_traj * sizeof(coord3d_d)); hipMalloc((void**)&res_d, n_points * sizeof(Tropicity)); // copy to device hipMemcpy(start_points_d, start_points, n_points * sizeof(coord3d_d), hipMemcpyHostToDevice); // cout << "e " << hipGetLastError() << endl; int block_size = 256; int grid_size = n_points / block_size + (n_points % block_size != 0); // std::cout << "points / gridsize / blocksize: " << n_points << ", " << grid_size << ", " << block_size << std::endl; hipLaunchKernelGGL(( classify_points_kernel_v1), dim3(grid_size), dim3(block_size), 0, 0, start_points_d, n_points, field_d, nx, ny, nz, trajectories_d, step_length, max_points_traj, bfielddir, res_d); // cout << "e " << hipGetLastError() << endl; // copy from device hipMemcpy(res.data(), res_d, n_points * sizeof(Tropicity), hipMemcpyDeviceToHost); // dealloc hipFree(field_d.ptr); hipFree(start_points_d); hipFree(trajectories_d); hipFree(res_d); delete[] field; delete[] start_points; return res; } std::vector<Tropicity> classify_points_cudax_v2(double* field_x_a, double* field_y_a, double* field_z_a, const int64_t nx, const int64_t ny, const int64_t nz, double* origin_a, double* spacing_a, const double* start_points_a, int64_t n_points, Direction bfielddir) { // std::cout << __PRETTY_FUNCTION__ << std::endl; #if 0 float steplength = 0.01; #else float step_length_ratio = 0.05; float step_length = step_length_ratio * spacing_a[0]; #endif int64_t max_points_traj = 10000; std::vector<Tropicity> res(n_points); coord3d_d* start_points = new coord3d_d[n_points]; coord3d_d* start_points_d; Tropicity* res_d; coord3d_d* trajectories_d; for (int64_t i = 0; i < n_points; i++) for (int64_t j = 0; j < 3; j++) start_points[i][j] = start_points_a[3 * i + j]; hipArray_t field_x_d, field_y_d, field_z_d; // hipChannelFormatDesc desc = hipCreateChannelDesc<double>(); hipChannelFormatDesc desc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned); // we pretend to store int2 instead of double hipExtent field_extent = make_hipExtent(nx, ny, nz); hipMalloc3DArray(&field_x_d, &desc, field_extent); hipMalloc3DArray(&field_y_d, &desc, field_extent); hipMalloc3DArray(&field_z_d, &desc, field_extent); hipMemcpy3DParms memCopyParametersX = {0}; memCopyParametersX.srcPtr = make_hipPitchedPtr(field_x_a, nx * sizeof(double), nx, ny); memCopyParametersX.dstArray = field_x_d; memCopyParametersX.extent = field_extent; memCopyParametersX.kind = hipMemcpyHostToDevice; // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipMemcpy3DParms memCopyParametersY = {0}; memCopyParametersY.srcPtr = make_hipPitchedPtr(field_y_a, nx * sizeof(double), nx, ny); memCopyParametersY.dstArray = field_y_d; memCopyParametersY.extent = field_extent; memCopyParametersY.kind = hipMemcpyHostToDevice; // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipMemcpy3DParms memCopyParametersZ = {0}; memCopyParametersZ.srcPtr = make_hipPitchedPtr(field_z_a, nx * sizeof(double), nx, ny); memCopyParametersZ.dstArray = field_z_d; memCopyParametersZ.extent = field_extent; memCopyParametersZ.kind = hipMemcpyHostToDevice; // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipMemcpy3DAsync(&memCopyParametersX, 0); hipMemcpy3DAsync(&memCopyParametersY, 0); hipMemcpy3DAsync(&memCopyParametersZ, 0); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; // printf("nx, ny, nz %lu, %lu, %lu\n", field_d.pitch, field_d.xsize, field_d.ysize); // prepare textures struct hipResourceDesc fieldXResDesc; memset(&fieldXResDesc, 0, sizeof(fieldXResDesc)); fieldXResDesc.resType = hipResourceTypeArray; fieldXResDesc.res.array.array = field_x_d; struct hipResourceDesc fieldYResDesc; memset(&fieldYResDesc, 0, sizeof(fieldYResDesc)); fieldYResDesc.resType = hipResourceTypeArray; fieldYResDesc.res.array.array = field_y_d; struct hipResourceDesc fieldZResDesc; memset(&fieldZResDesc, 0, sizeof(fieldZResDesc)); fieldZResDesc.resType = hipResourceTypeArray; fieldZResDesc.res.array.array = field_z_d; struct hipTextureDesc fieldTexDesc; memset(&fieldTexDesc, 0, sizeof(fieldTexDesc)); fieldTexDesc.addressMode[0] = hipAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.addressMode[1] = hipAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.addressMode[2] = hipAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.filterMode = hipFilterModePoint; // ie, interpolate linearly fieldTexDesc.readMode = hipReadModeElementType; fieldTexDesc.normalizedCoords = 0; hipTextureObject_t fieldXTexture = 0, fieldYTexture = 0, fieldZTexture = 0; hipCreateTextureObject(&fieldXTexture, &fieldXResDesc, &fieldTexDesc, nullptr); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipCreateTextureObject(&fieldYTexture, &fieldYResDesc, &fieldTexDesc, nullptr); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipCreateTextureObject(&fieldZTexture, &fieldZResDesc, &fieldTexDesc, nullptr); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; // alloc hipMalloc((void**)&start_points_d, n_points * sizeof(coord3d_d)); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipMalloc((void**)&trajectories_d, n_points * max_points_traj * sizeof(coord3d_d)); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipMalloc((void**)&res_d, n_points * sizeof(Tropicity)); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; // copy to device hipMemcpy(start_points_d, start_points, n_points * sizeof(coord3d_d), hipMemcpyHostToDevice); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; int block_size = 256; int grid_size = n_points / block_size + (n_points % block_size != 0); // std::cout << "points / gridsize / blocksize: " << n_points << ", " << grid_size << ", " << block_size << std::endl; hipLaunchKernelGGL(( classify_points_kernel_v2), dim3(grid_size), dim3(block_size), 0, 0, start_points_d, n_points, fieldXTexture, fieldYTexture, fieldZTexture, nx, ny, nz, trajectories_d, step_length, max_points_traj, bfielddir, res_d); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; // copy from device hipMemcpy(res.data(), res_d, n_points * sizeof(Tropicity), hipMemcpyDeviceToHost); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipDestroyTextureObject(fieldXTexture); hipDestroyTextureObject(fieldYTexture); hipDestroyTextureObject(fieldZTexture); // dealloc hipFree(start_points_d); hipFree(trajectories_d); hipFree(res_d); delete[] start_points; return res; } std::vector<Tropicity> classify_points_cudax_v3(float* field_x_a, float* field_y_a, float* field_z_a, const int64_t nx, const int64_t ny, const int64_t nz, double* origin_a, double* spacing_a, const double* start_points_a, int64_t n_points, Direction bfielddir) { // std::cout << __PRETTY_FUNCTION__ << std::endl; #if 0 float steplength = 0.01; #else float step_length_ratio = 0.05; float step_length = step_length_ratio * spacing_a[0]; #endif int64_t max_points_traj = 10000; std::vector<Tropicity> res(n_points); coord3d_d* start_points = new coord3d_d[n_points]; coord3d_d* start_points_d; Tropicity* res_d; coord3d_d* trajectories_d; for (int64_t i = 0; i < n_points; i++) for (int64_t j = 0; j < 3; j++) start_points[i][j] = start_points_a[3 * i + j]; float4* field_float4 = new float4[nx * ny * nz]; for (int64_t i = 0; i < nx * ny * nz; i++) { field_float4[i].x = field_x_a[i]; field_float4[i].y = field_y_a[i]; field_float4[i].z = field_z_a[i]; } hipArray_t field_d; hipChannelFormatDesc desc = hipCreateChannelDesc<float4>(); // hipChannelFormatDesc desc = hipCreateChannelDesc(32, 32, 32, 0, hipChannelFormatKindFloat); hipExtent field_extent = make_hipExtent(nx, ny, nz); hipMalloc3DArray(&field_d, &desc, field_extent); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipMemcpy3DParms memCopyParameters = {0}; memCopyParameters.srcPtr = make_hipPitchedPtr(field_float4, nx * sizeof(float4), nx, ny); memCopyParameters.dstArray = field_d; memCopyParameters.extent = field_extent; memCopyParameters.kind = hipMemcpyHostToDevice; // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipMemcpy3DAsync(&memCopyParameters, 0); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; // printf("nx, ny, nz %lu, %lu, %lu\n", field_d.pitch, field_d.xsize, field_d.ysize); // prepare textures struct hipResourceDesc fieldResDesc; memset(&fieldResDesc, 0, sizeof(fieldResDesc)); fieldResDesc.resType = hipResourceTypeArray; fieldResDesc.res.array.array = field_d; struct hipTextureDesc fieldTexDesc; memset(&fieldTexDesc, 0, sizeof(fieldTexDesc)); fieldTexDesc.addressMode[0] = hipAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.addressMode[1] = hipAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.addressMode[2] = hipAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.filterMode = hipFilterModeLinear; // ie, interpolate linearly fieldTexDesc.readMode = hipReadModeElementType; fieldTexDesc.normalizedCoords = 0; hipTextureObject_t fieldTexture = 0; hipCreateTextureObject(&fieldTexture, &fieldResDesc, &fieldTexDesc, nullptr); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; // alloc hipMalloc((void**)&start_points_d, n_points * sizeof(coord3d_d)); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipMalloc((void**)&trajectories_d, n_points * max_points_traj * sizeof(coord3d_d)); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipMalloc((void**)&res_d, n_points * sizeof(Tropicity)); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; // copy to device hipMemcpy(start_points_d, start_points, n_points * sizeof(coord3d_d), hipMemcpyHostToDevice); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; int block_size = 256; int grid_size = n_points / block_size + (n_points % block_size != 0); // std::cout << "points / gridsize / blocksize: " << n_points << ", " << grid_size << ", " << block_size << std::endl; hipLaunchKernelGGL(( classify_points_kernel_v3), dim3(grid_size), dim3(block_size), 0, 0, start_points_d, n_points, fieldTexture, nx, ny, nz, trajectories_d, step_length, max_points_traj, bfielddir, res_d); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; // copy from device hipMemcpy(res.data(), res_d, n_points * sizeof(Tropicity), hipMemcpyDeviceToHost); // cout << "e " << hipGetErrorName(hipGetLastError()) << endl; hipDestroyTextureObject(fieldTexture); // dealloc hipFree(start_points_d); hipFree(trajectories_d); hipFree(res_d); delete[] start_points; return res; }
cb3b55cb8056c439127b92de3c7b56366aaaae49.cu
#include <cmath> #include <fstream> #include <iostream> #include <vector> #include "dir-enum.hh" #include "geometry3_d.hh" #include "plane.hh" #include "trop-enum.hh" #include <cuda.h> #include <cuda_runtime_api.h> // #include <vector_functions.h> __device__ inline double3 operator*(const double d, const double3 d3) { const double xval = d * d3.x; const double yval = d * d3.y; const double zval = d * d3.z; return make_double3(xval, yval, zval); } __device__ inline double3 operator+(const double3 d3a, const double3 d3b) { const double xval = d3a.x * d3b.x; const double yval = d3a.y * d3b.y; const double zval = d3a.z * d3b.z; return make_double3(xval, yval, zval); } __device__ bool outofbounds(const coord3d_d& pos, int64_t nx, int64_t ny, int64_t nz) { if (pos.x[0] >= nx - 1 || pos.x[1] >= ny - 1 || pos.x[2] >= nz - 1 || pos.x[0] < 0 || pos.x[1] < 0 || pos.x[2] < 0) { return true; } return false; } // trilinear interpolation __device__ bool getvector_v1(const coord3d_d& pos, const cudaPitchedPtr field_d, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d& res_vec) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) { // printf("x %f, y %f, z %f \n", pos[0], pos[1], pos[2]); // } char* ptr = (char*)field_d.ptr; // pitch being xdim * sizeof(thing) rounded up to a multiple of 32 size_t pitch = field_d.pitch; if (outofbounds(pos, nx, ny, nz)) return false; double x = pos[0]; double y = pos[1]; double z = pos[2]; int x0 = int(floor(pos[0])); int x1 = x0 + 1; int y0 = int(floor(pos[1])); int y1 = y0 + 1; int z0 = int(floor(pos[2])); int z1 = z0 + 1; coord3d_d v000 = *(coord3d_d*)(ptr + (pitch * ny * z0 + pitch * y0 + x0 * sizeof(coord3d_d))); coord3d_d v001 = *(coord3d_d*)(ptr + (pitch * ny * z1 + pitch * y0 + x0 * sizeof(coord3d_d))); coord3d_d v010 = *(coord3d_d*)(ptr + (pitch * ny * z0 + pitch * y1 + x0 * sizeof(coord3d_d))); coord3d_d v011 = *(coord3d_d*)(ptr + (pitch * ny * z1 + pitch * y1 + x0 * sizeof(coord3d_d))); coord3d_d v100 = *(coord3d_d*)(ptr + (pitch * ny * z0 + pitch * y0 + x1 * sizeof(coord3d_d))); coord3d_d v101 = *(coord3d_d*)(ptr + (pitch * ny * z1 + pitch * y0 + x1 * sizeof(coord3d_d))); coord3d_d v110 = *(coord3d_d*)(ptr + (pitch * ny * z0 + pitch * y1 + x1 * sizeof(coord3d_d))); coord3d_d v111 = *(coord3d_d*)(ptr + (pitch * ny * z1 + pitch * y1 + x1 * sizeof(coord3d_d))); coord3d_d aux0 = (x1 - x) * v000 + (x - x0) * v100; coord3d_d aux1 = (x1 - x) * v010 + (x - x0) * v110; coord3d_d aux2 = (x1 - x) * v001 + (x - x0) * v101; coord3d_d aux3 = (x1 - x) * v011 + (x - x0) * v111; coord3d_d aux4 = (y1 - y) * aux0 + (y - y0) * aux1; coord3d_d aux5 = (y1 - y) * aux2 + (y - y0) * aux3; res_vec = (z1 - z) * aux4 + (z - z0) * aux5; return true; } __device__ inline coord3d_d get_val_v2(const int x, const int y, const int z, cudaTextureObject_t field_x, cudaTextureObject_t field_y, cudaTextureObject_t field_z) { int2 xvalint2 = tex3D<int2>(field_x, x, y, z); double xval = __hiloint2double(xvalint2.y, xvalint2.x); int2 yvalint2 = tex3D<int2>(field_y, x, y, z); double yval = __hiloint2double(yvalint2.y, yvalint2.x); int2 zvalint2 = tex3D<int2>(field_z, x, y, z); double zval = __hiloint2double(zvalint2.y, zvalint2.x); return coord3d_d(xval, yval, zval); } // trilinear interpolation __device__ bool getvector_v2(const coord3d_d& pos, cudaTextureObject_t field_x, cudaTextureObject_t field_y, cudaTextureObject_t field_z, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d& res_vec) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) { // printf("x %f, y %f, z %f \n", pos[0], pos[1], pos[2]); // } if (outofbounds(pos, nx, ny, nz)) return false; double x = pos[0]; double y = pos[1]; double z = pos[2]; int x0 = int(floor(pos[0])); int x1 = x0 + 1; int y0 = int(floor(pos[1])); int y1 = y0 + 1; int z0 = int(floor(pos[2])); int z1 = z0 + 1; coord3d_d v000 = get_val_v2(x0, y0, z0, field_x, field_y, field_z); coord3d_d v001 = get_val_v2(x0, y0, z1, field_x, field_y, field_z); coord3d_d v010 = get_val_v2(x0, y1, z0, field_x, field_y, field_z); coord3d_d v011 = get_val_v2(x0, y1, z1, field_x, field_y, field_z); coord3d_d v100 = get_val_v2(x1, y0, z0, field_x, field_y, field_z); coord3d_d v101 = get_val_v2(x1, y0, z1, field_x, field_y, field_z); coord3d_d v110 = get_val_v2(x1, y1, z0, field_x, field_y, field_z); coord3d_d v111 = get_val_v2(x1, y1, z1, field_x, field_y, field_z); coord3d_d aux0 = (x1 - x) * v000 + (x - x0) * v100; coord3d_d aux1 = (x1 - x) * v010 + (x - x0) * v110; coord3d_d aux2 = (x1 - x) * v001 + (x - x0) * v101; coord3d_d aux3 = (x1 - x) * v011 + (x - x0) * v111; coord3d_d aux4 = (y1 - y) * aux0 + (y - y0) * aux1; coord3d_d aux5 = (y1 - y) * aux2 + (y - y0) * aux3; res_vec = (z1 - z) * aux4 + (z - z0) * aux5; return true; } // trilinear interpolation __device__ inline bool getvector_v3(const coord3d_d& pos, cudaTextureObject_t field_d, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d& res_vec) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) { // printf("x %f, y %f, z %f \n", pos[0], pos[1], pos[2]); // } if (outofbounds(pos, nx, ny, nz)) return false; double x = pos[0]; double y = pos[1]; double z = pos[2]; float4 val = tex3D<float4>(field_d, x, y, z); res_vec = coord3d_d(val.x, val.y, val.z); return true; } // Runge-Kutta method, 4th order // c --> positions, k --> vectors at c __device__ bool extend_rungekutta_v1(const cudaPitchedPtr field_d, const int64_t nx, const int64_t ny, const int64_t nz, const coord3d_d& prevpos, float step_length, coord3d_d& newpos) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; coord3d_d c0 = prevpos; coord3d_d k0; bool good = getvector_v1(c0, field_d, nx, ny, nz, k0); k0 = k0.normalised() * step_length; const coord3d_d c1 = c0 + k0 * 0.5; coord3d_d k1; good = getvector_v1(c1, field_d, nx, ny, nz, k1); if (!good) return false; k1 = k1.normalised() * step_length; const coord3d_d c2 = c0 + k1 * 0.5; coord3d_d k2; good = getvector_v1(c2, field_d, nx, ny, nz, k2); if (!good) return false; k2 = k2.normalised() * step_length; const coord3d_d c3 = c0 + k2; coord3d_d k3; good = getvector_v1(c3, field_d, nx, ny, nz, k3); if (!good) return false; k3 = k3.normalised() * step_length; const coord3d_d c4 = c0 + (k0 + k1 * 2.0 + k2 * 2.0 + k3) / 6.0; coord3d_d k4; good = getvector_v1(c4, field_d, nx, ny, nz, k4); if (!good) return false; newpos = c4; return true; } // Runge-Kutta method, 4th order // c --> positions, k --> vectors at c __device__ bool extend_rungekutta_v2(const cudaTextureObject_t field_x, const cudaTextureObject_t field_y, const cudaTextureObject_t field_z, const int64_t nx, const int64_t ny, const int64_t nz, const coord3d_d& prevpos, float step_length, coord3d_d& newpos) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; coord3d_d c0 = prevpos; coord3d_d k0; bool good = getvector_v2(c0, field_x, field_y, field_z, nx, ny, nz, k0); k0 = k0.normalised() * step_length; const coord3d_d c1 = c0 + k0 * 0.5; coord3d_d k1; good = getvector_v2(c1, field_x, field_y, field_z, nx, ny, nz, k1); if (!good) return false; k1 = k1.normalised() * step_length; const coord3d_d c2 = c0 + k1 * 0.5; coord3d_d k2; good = getvector_v2(c2, field_x, field_y, field_z, nx, ny, nz, k2); if (!good) return false; k2 = k2.normalised() * step_length; const coord3d_d c3 = c0 + k2; coord3d_d k3; good = getvector_v2(c3, field_x, field_y, field_z, nx, ny, nz, k3); if (!good) return false; k3 = k3.normalised() * step_length; const coord3d_d c4 = c0 + (k0 + k1 * 2.0 + k2 * 2.0 + k3) / 6.0; coord3d_d k4; good = getvector_v2(c4, field_x, field_y, field_z, nx, ny, nz, k4); if (!good) return false; newpos = c4; return true; } // Runge-Kutta method, 4th order // c --> positions, k --> vectors at c __device__ bool extend_rungekutta_v3(const cudaTextureObject_t field_d, const int64_t nx, const int64_t ny, const int64_t nz, const coord3d_d& prevpos, float step_length, coord3d_d& newpos) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; coord3d_d c0 = prevpos; coord3d_d k0; bool good = getvector_v3(c0, field_d, nx, ny, nz, k0); k0 = k0.normalised() * step_length; const coord3d_d c1 = c0 + k0 * 0.5; coord3d_d k1; good = getvector_v3(c1, field_d, nx, ny, nz, k1); if (!good) return false; k1 = k1.normalised() * step_length; const coord3d_d c2 = c0 + k1 * 0.5; coord3d_d k2; good = getvector_v3(c2, field_d, nx, ny, nz, k2); if (!good) return false; k2 = k2.normalised() * step_length; const coord3d_d c3 = c0 + k2; coord3d_d k3; good = getvector_v3(c3, field_d, nx, ny, nz, k3); if (!good) return false; k3 = k3.normalised() * step_length; const coord3d_d c4 = c0 + (k0 + k1 * 2.0 + k2 * 2.0 + k3) / 6.0; coord3d_d k4; good = getvector_v3(c4, field_d, nx, ny, nz, k4); if (!good) return false; newpos = c4; return true; } __device__ void complete_trajectory_v1(const cudaPitchedPtr field_d, const int nx, const int ny, const int nz, coord3d_d* __restrict__ positions, int& index, int max_points_traj, float return_ratio, float step_length, bool& out_of_bounds) { // const int indx = threadIdx.x + blockIdx.x * blockDim.x; out_of_bounds = false; double dist2farthest = -1; // if this is set at 0 at declaration, the following while loop will never run if (index > 0) { for (int i = 0; i <= index; i++) dist2farthest = std::max(dist2farthest, (positions[i] - positions[0]).norm()); } // if we get to a point that is less than return_ratio of the longest distance in the trajectory while ((positions[index] - positions[0]).norm() > return_ratio * dist2farthest) { if (!extend_rungekutta_v1(field_d, nx, ny, nz, positions[index], step_length, positions[index + 1])) { out_of_bounds = true; // printf("%d: %d oob\n", indx, index); return; } index++; dist2farthest = std::max(dist2farthest, (positions[index] - positions[0]).norm()); if (index == max_points_traj - 2) { step_length *= 1.5; index = 0; dist2farthest = -1; } } // printf("%d: %d\n", indx, index); } __device__ void complete_trajectory_v2(const cudaTextureObject_t field_x, const cudaTextureObject_t field_y, const cudaTextureObject_t field_z, const int nx, const int ny, const int nz, coord3d_d* __restrict__ positions, int& index, int max_points_traj, float return_ratio, float step_length, bool& out_of_bounds) { // const int indx = threadIdx.x + blockIdx.x * blockDim.x; out_of_bounds = false; double dist2farthest = -1; // if this is set at 0 at declaration, the following while loop will never run if (index > 0) { for (int i = 0; i <= index; i++) dist2farthest = std::max(dist2farthest, (positions[i] - positions[0]).norm()); } // if we get to a point that is less than return_ratio of the longest distance in the trajectory while ((positions[index] - positions[0]).norm() > return_ratio * dist2farthest) { if (!extend_rungekutta_v2(field_x, field_y, field_z, nx, ny, nz, positions[index], step_length, positions[index + 1])) { out_of_bounds = true; // printf("%d: %d oob\n", indx, index); return; } index++; dist2farthest = std::max(dist2farthest, (positions[index] - positions[0]).norm()); if (index == max_points_traj - 2) { step_length *= 1.5; index = 0; dist2farthest = -1; } } // printf("%d: %d\n", indx, index); } __device__ void complete_trajectory_v3(const cudaTextureObject_t field_d, const int nx, const int ny, const int nz, coord3d_d* __restrict__ positions, int& index, int max_points_traj, float return_ratio, float step_length, bool& out_of_bounds) { // const int indx = threadIdx.x + blockIdx.x * blockDim.x; out_of_bounds = false; double dist2farthest = -1; // if this is set at 0 at declaration, the following while loop will never run if (index > 0) { for (int i = 0; i <= index; i++) dist2farthest = std::max(dist2farthest, (positions[i] - positions[0]).norm()); } // if we get to a point that is less than return_ratio of the longest distance in the trajectory while ((positions[index] - positions[0]).norm() > return_ratio * dist2farthest) { if (!extend_rungekutta_v3(field_d, nx, ny, nz, positions[index], step_length, positions[index + 1])) { out_of_bounds = true; // printf("%d: %d oob\n", indx, index); return; } index++; dist2farthest = std::max(dist2farthest, (positions[index] - positions[0]).norm()); if (index == max_points_traj - 2) { step_length *= 1.5; index = 0; dist2farthest = -1; } } // printf("%d: %d\n", indx, index); } __device__ Tropicity classify_trajectory(const coord3d_d* __restrict__ positions, int n_points_in_traj, Direction bfielddir, bool out_of_bounds) { // int indx = threadIdx.x + blockIdx.x * blockDim.x; // printf("p in traj %d: %d\n", indx, n_points_in_traj); coord3d_d bfield; switch (bfielddir) { case Direction::pos_x: { bfield = coord3d_d(1, 0, 0); break; } case Direction::neg_x: { bfield = coord3d_d(-1, 0, 0); break; } case Direction::pos_y: { bfield = coord3d_d(0, 1, 0); break; } case Direction::neg_y: { bfield = coord3d_d(0, -1, 0); break; } case Direction::pos_z: { bfield = coord3d_d(0, 0, 1); break; } case Direction::neg_z: { bfield = coord3d_d(0, 0, -1); break; } default: { return Tropicity::input_error; } } if (out_of_bounds) return Tropicity::outofbounds; coord3d_d crosssum(0, 0, 0); for (size_t i = 0; i < n_points_in_traj; i++) { crosssum += positions[(i - 1 + n_points_in_traj) % n_points_in_traj].cross(positions[i]); } // crossum += positions[positions.size()-1].cross(positions[0]); // if (indx < 40) printf("cross: %f/%f/%f\n", crosssum[0], crosssum[1], crosssum[2]); double dot_product = bfield.dot(crosssum); if (dot_product > 0) return Tropicity::paratropic; else if (dot_product < 0) return Tropicity::diatropic; else return Tropicity::unclassifyable; } __global__ void classify_points_kernel_v1(coord3d_d* __restrict__ points, int64_t n_points, const cudaPitchedPtr field_d, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d* __restrict__ trajectories_d, float step_length, int64_t max_points_traj, Direction bfielddir, Tropicity* __restrict__ tropicities_d) { const int32_t indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) printf("hello from the gpu: %d\n", indx); if (indx > n_points - 1) return; coord3d_d vec(0, 0, 0); // if (indx < 2) printf("pos %d %f/%f/%f\n", indx, points[indx][0], points[indx][1], points[indx][2]); bool good = getvector_v1(points[indx], field_d, nx, ny, nz, vec); // if (indx < 2) printf("found vec %d %d: %f/%f/%f\n", indx, good, vec[0], vec[1], vec[2]); if (!good) { tropicities_d[indx] = Tropicity::outofbounds; return; } bool out_of_bounds; int current_index_in_traj = 0; float return_ratio = 0.2; trajectories_d[max_points_traj * indx] = points[indx]; complete_trajectory_v1(field_d, nx, ny, nz, trajectories_d + max_points_traj * indx, current_index_in_traj, max_points_traj, return_ratio, step_length, out_of_bounds); tropicities_d[indx] = classify_trajectory(trajectories_d + max_points_traj * indx, current_index_in_traj + 1, bfielddir, out_of_bounds); } __global__ void classify_points_kernel_v2(coord3d_d* __restrict__ points, int64_t n_points, cudaTextureObject_t field_x, cudaTextureObject_t field_y, cudaTextureObject_t field_z, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d* __restrict__ trajectories_d, float step_length, int64_t max_points_traj, Direction bfielddir, Tropicity* __restrict__ tropicities_d) { const int32_t indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) printf("hello from the gpu: %d\n", indx); if (indx > n_points - 1) return; coord3d_d vec(0, 0, 0); // if (indx < 2) printf("pos %d %f/%f/%f\n", indx, points[indx][0], points[indx][1], points[indx][2]); bool good = getvector_v2(points[indx], field_x, field_y, field_z, nx, ny, nz, vec); // if (indx < 2) printf("found vec %d %d: %f/%f/%f\n", indx, good, vec[0], vec[1], vec[2]); if (!good) { tropicities_d[indx] = Tropicity::outofbounds; return; } bool out_of_bounds; int current_index_in_traj = 0; float return_ratio = 0.2; trajectories_d[max_points_traj * indx] = points[indx]; complete_trajectory_v2(field_x, field_y, field_z, nx, ny, nz, trajectories_d + max_points_traj * indx, current_index_in_traj, max_points_traj, return_ratio, step_length, out_of_bounds); tropicities_d[indx] = classify_trajectory(trajectories_d + max_points_traj * indx, current_index_in_traj + 1, bfielddir, out_of_bounds); } __global__ void classify_points_kernel_v3(coord3d_d* __restrict__ points, int64_t n_points, cudaTextureObject_t field_d, const int64_t nx, const int64_t ny, const int64_t nz, coord3d_d* __restrict__ trajectories_d, float step_length, int64_t max_points_traj, Direction bfielddir, Tropicity* __restrict__ tropicities_d) { const int32_t indx = threadIdx.x + blockIdx.x * blockDim.x; // if (indx < 2) printf("hello from the gpu: %d\n", indx); if (indx > n_points - 1) { return; } coord3d_d vec(0, 0, 0); // if (indx < 2) printf("pos %d %f/%f/%f\n", indx, points[indx][0], points[indx][1], points[indx][2]); bool good = getvector_v3(points[indx], field_d, nx, ny, nz, vec); // if (indx < 2) printf("found vec %d %d: %f/%f/%f\n", indx, good, vec[0], vec[1], vec[2]); if (!good) { tropicities_d[indx] = Tropicity::outofbounds; return; } bool out_of_bounds; int current_index_in_traj = 0; float return_ratio = 0.2; trajectories_d[max_points_traj * indx] = points[indx]; complete_trajectory_v3(field_d, nx, ny, nz, trajectories_d + max_points_traj * indx, current_index_in_traj, max_points_traj, return_ratio, step_length, out_of_bounds); tropicities_d[indx] = classify_trajectory(trajectories_d + max_points_traj * indx, current_index_in_traj + 1, bfielddir, out_of_bounds); } std::vector<Tropicity> classify_points_cudax_v1(const double* field_a, const int64_t nx, const int64_t ny, const int64_t nz, double* origin_a, double* spacing_a, const double* start_points_a, int64_t n_points, Direction bfielddir) { // std::cout << __PRETTY_FUNCTION__ << std::endl; #if 0 float steplength = 0.01; #else float step_length_ratio = 0.05; float step_length = step_length_ratio * spacing_a[0]; #endif int64_t max_points_traj = 10000; std::vector<Tropicity> res(n_points); coord3d_d* field = new coord3d_d[nx * ny * nz]; coord3d_d* start_points = new coord3d_d[n_points]; coord3d_d* start_points_d; Tropicity* res_d; coord3d_d* trajectories_d; for (int64_t i = 0; i < nx * ny * nz; i++) for (int64_t j = 0; j < 3; j++) field[i][j] = field_a[3 * i + j]; for (int64_t i = 0; i < n_points; i++) for (int64_t j = 0; j < 3; j++) start_points[i][j] = start_points_a[3 * i + j]; cudaPitchedPtr field_d; cudaExtent field_extent = make_cudaExtent(nx * sizeof(coord3d_d), ny, nz); cudaMalloc3D(&field_d, field_extent); cudaMemcpy3DParms memCopyParameters = {0}; memCopyParameters.srcPtr = make_cudaPitchedPtr(field, nx * sizeof(coord3d_d), ny, nz); memCopyParameters.dstPtr = field_d; memCopyParameters.extent = field_extent; memCopyParameters.kind = cudaMemcpyHostToDevice; cudaMemcpy3DAsync(&memCopyParameters, 0); // printf("nx, ny, nz %lu, %lu, %lu\n", field_d.pitch, field_d.xsize, field_d.ysize); // alloc cudaMalloc((void**)&start_points_d, n_points * sizeof(coord3d_d)); cudaMalloc((void**)&trajectories_d, n_points * max_points_traj * sizeof(coord3d_d)); cudaMalloc((void**)&res_d, n_points * sizeof(Tropicity)); // copy to device cudaMemcpy(start_points_d, start_points, n_points * sizeof(coord3d_d), cudaMemcpyHostToDevice); // cout << "e " << cudaGetLastError() << endl; int block_size = 256; int grid_size = n_points / block_size + (n_points % block_size != 0); // std::cout << "points / gridsize / blocksize: " << n_points << ", " << grid_size << ", " << block_size << std::endl; classify_points_kernel_v1<<<grid_size, block_size>>>(start_points_d, n_points, field_d, nx, ny, nz, trajectories_d, step_length, max_points_traj, bfielddir, res_d); // cout << "e " << cudaGetLastError() << endl; // copy from device cudaMemcpy(res.data(), res_d, n_points * sizeof(Tropicity), cudaMemcpyDeviceToHost); // dealloc cudaFree(field_d.ptr); cudaFree(start_points_d); cudaFree(trajectories_d); cudaFree(res_d); delete[] field; delete[] start_points; return res; } std::vector<Tropicity> classify_points_cudax_v2(double* field_x_a, double* field_y_a, double* field_z_a, const int64_t nx, const int64_t ny, const int64_t nz, double* origin_a, double* spacing_a, const double* start_points_a, int64_t n_points, Direction bfielddir) { // std::cout << __PRETTY_FUNCTION__ << std::endl; #if 0 float steplength = 0.01; #else float step_length_ratio = 0.05; float step_length = step_length_ratio * spacing_a[0]; #endif int64_t max_points_traj = 10000; std::vector<Tropicity> res(n_points); coord3d_d* start_points = new coord3d_d[n_points]; coord3d_d* start_points_d; Tropicity* res_d; coord3d_d* trajectories_d; for (int64_t i = 0; i < n_points; i++) for (int64_t j = 0; j < 3; j++) start_points[i][j] = start_points_a[3 * i + j]; cudaArray_t field_x_d, field_y_d, field_z_d; // cudaChannelFormatDesc desc = cudaCreateChannelDesc<double>(); cudaChannelFormatDesc desc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned); // we pretend to store int2 instead of double cudaExtent field_extent = make_cudaExtent(nx, ny, nz); cudaMalloc3DArray(&field_x_d, &desc, field_extent); cudaMalloc3DArray(&field_y_d, &desc, field_extent); cudaMalloc3DArray(&field_z_d, &desc, field_extent); cudaMemcpy3DParms memCopyParametersX = {0}; memCopyParametersX.srcPtr = make_cudaPitchedPtr(field_x_a, nx * sizeof(double), nx, ny); memCopyParametersX.dstArray = field_x_d; memCopyParametersX.extent = field_extent; memCopyParametersX.kind = cudaMemcpyHostToDevice; // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaMemcpy3DParms memCopyParametersY = {0}; memCopyParametersY.srcPtr = make_cudaPitchedPtr(field_y_a, nx * sizeof(double), nx, ny); memCopyParametersY.dstArray = field_y_d; memCopyParametersY.extent = field_extent; memCopyParametersY.kind = cudaMemcpyHostToDevice; // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaMemcpy3DParms memCopyParametersZ = {0}; memCopyParametersZ.srcPtr = make_cudaPitchedPtr(field_z_a, nx * sizeof(double), nx, ny); memCopyParametersZ.dstArray = field_z_d; memCopyParametersZ.extent = field_extent; memCopyParametersZ.kind = cudaMemcpyHostToDevice; // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaMemcpy3DAsync(&memCopyParametersX, 0); cudaMemcpy3DAsync(&memCopyParametersY, 0); cudaMemcpy3DAsync(&memCopyParametersZ, 0); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; // printf("nx, ny, nz %lu, %lu, %lu\n", field_d.pitch, field_d.xsize, field_d.ysize); // prepare textures struct cudaResourceDesc fieldXResDesc; memset(&fieldXResDesc, 0, sizeof(fieldXResDesc)); fieldXResDesc.resType = cudaResourceTypeArray; fieldXResDesc.res.array.array = field_x_d; struct cudaResourceDesc fieldYResDesc; memset(&fieldYResDesc, 0, sizeof(fieldYResDesc)); fieldYResDesc.resType = cudaResourceTypeArray; fieldYResDesc.res.array.array = field_y_d; struct cudaResourceDesc fieldZResDesc; memset(&fieldZResDesc, 0, sizeof(fieldZResDesc)); fieldZResDesc.resType = cudaResourceTypeArray; fieldZResDesc.res.array.array = field_z_d; struct cudaTextureDesc fieldTexDesc; memset(&fieldTexDesc, 0, sizeof(fieldTexDesc)); fieldTexDesc.addressMode[0] = cudaAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.addressMode[1] = cudaAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.addressMode[2] = cudaAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.filterMode = cudaFilterModePoint; // ie, interpolate linearly fieldTexDesc.readMode = cudaReadModeElementType; fieldTexDesc.normalizedCoords = 0; cudaTextureObject_t fieldXTexture = 0, fieldYTexture = 0, fieldZTexture = 0; cudaCreateTextureObject(&fieldXTexture, &fieldXResDesc, &fieldTexDesc, nullptr); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaCreateTextureObject(&fieldYTexture, &fieldYResDesc, &fieldTexDesc, nullptr); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaCreateTextureObject(&fieldZTexture, &fieldZResDesc, &fieldTexDesc, nullptr); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; // alloc cudaMalloc((void**)&start_points_d, n_points * sizeof(coord3d_d)); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaMalloc((void**)&trajectories_d, n_points * max_points_traj * sizeof(coord3d_d)); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaMalloc((void**)&res_d, n_points * sizeof(Tropicity)); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; // copy to device cudaMemcpy(start_points_d, start_points, n_points * sizeof(coord3d_d), cudaMemcpyHostToDevice); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; int block_size = 256; int grid_size = n_points / block_size + (n_points % block_size != 0); // std::cout << "points / gridsize / blocksize: " << n_points << ", " << grid_size << ", " << block_size << std::endl; classify_points_kernel_v2<<<grid_size, block_size>>>(start_points_d, n_points, fieldXTexture, fieldYTexture, fieldZTexture, nx, ny, nz, trajectories_d, step_length, max_points_traj, bfielddir, res_d); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; // copy from device cudaMemcpy(res.data(), res_d, n_points * sizeof(Tropicity), cudaMemcpyDeviceToHost); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaDestroyTextureObject(fieldXTexture); cudaDestroyTextureObject(fieldYTexture); cudaDestroyTextureObject(fieldZTexture); // dealloc cudaFree(start_points_d); cudaFree(trajectories_d); cudaFree(res_d); delete[] start_points; return res; } std::vector<Tropicity> classify_points_cudax_v3(float* field_x_a, float* field_y_a, float* field_z_a, const int64_t nx, const int64_t ny, const int64_t nz, double* origin_a, double* spacing_a, const double* start_points_a, int64_t n_points, Direction bfielddir) { // std::cout << __PRETTY_FUNCTION__ << std::endl; #if 0 float steplength = 0.01; #else float step_length_ratio = 0.05; float step_length = step_length_ratio * spacing_a[0]; #endif int64_t max_points_traj = 10000; std::vector<Tropicity> res(n_points); coord3d_d* start_points = new coord3d_d[n_points]; coord3d_d* start_points_d; Tropicity* res_d; coord3d_d* trajectories_d; for (int64_t i = 0; i < n_points; i++) for (int64_t j = 0; j < 3; j++) start_points[i][j] = start_points_a[3 * i + j]; float4* field_float4 = new float4[nx * ny * nz]; for (int64_t i = 0; i < nx * ny * nz; i++) { field_float4[i].x = field_x_a[i]; field_float4[i].y = field_y_a[i]; field_float4[i].z = field_z_a[i]; } cudaArray_t field_d; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float4>(); // cudaChannelFormatDesc desc = cudaCreateChannelDesc(32, 32, 32, 0, cudaChannelFormatKindFloat); cudaExtent field_extent = make_cudaExtent(nx, ny, nz); cudaMalloc3DArray(&field_d, &desc, field_extent); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaMemcpy3DParms memCopyParameters = {0}; memCopyParameters.srcPtr = make_cudaPitchedPtr(field_float4, nx * sizeof(float4), nx, ny); memCopyParameters.dstArray = field_d; memCopyParameters.extent = field_extent; memCopyParameters.kind = cudaMemcpyHostToDevice; // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaMemcpy3DAsync(&memCopyParameters, 0); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; // printf("nx, ny, nz %lu, %lu, %lu\n", field_d.pitch, field_d.xsize, field_d.ysize); // prepare textures struct cudaResourceDesc fieldResDesc; memset(&fieldResDesc, 0, sizeof(fieldResDesc)); fieldResDesc.resType = cudaResourceTypeArray; fieldResDesc.res.array.array = field_d; struct cudaTextureDesc fieldTexDesc; memset(&fieldTexDesc, 0, sizeof(fieldTexDesc)); fieldTexDesc.addressMode[0] = cudaAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.addressMode[1] = cudaAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.addressMode[2] = cudaAddressModeBorder; // alternatively: wrap, clamp, mirror fieldTexDesc.filterMode = cudaFilterModeLinear; // ie, interpolate linearly fieldTexDesc.readMode = cudaReadModeElementType; fieldTexDesc.normalizedCoords = 0; cudaTextureObject_t fieldTexture = 0; cudaCreateTextureObject(&fieldTexture, &fieldResDesc, &fieldTexDesc, nullptr); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; // alloc cudaMalloc((void**)&start_points_d, n_points * sizeof(coord3d_d)); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaMalloc((void**)&trajectories_d, n_points * max_points_traj * sizeof(coord3d_d)); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaMalloc((void**)&res_d, n_points * sizeof(Tropicity)); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; // copy to device cudaMemcpy(start_points_d, start_points, n_points * sizeof(coord3d_d), cudaMemcpyHostToDevice); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; int block_size = 256; int grid_size = n_points / block_size + (n_points % block_size != 0); // std::cout << "points / gridsize / blocksize: " << n_points << ", " << grid_size << ", " << block_size << std::endl; classify_points_kernel_v3<<<grid_size, block_size>>>(start_points_d, n_points, fieldTexture, nx, ny, nz, trajectories_d, step_length, max_points_traj, bfielddir, res_d); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; // copy from device cudaMemcpy(res.data(), res_d, n_points * sizeof(Tropicity), cudaMemcpyDeviceToHost); // cout << "e " << cudaGetErrorName(cudaGetLastError()) << endl; cudaDestroyTextureObject(fieldTexture); // dealloc cudaFree(start_points_d); cudaFree(trajectories_d); cudaFree(res_d); delete[] start_points; return res; }
2132a9cb453dcc5745136fa514f10eacaf066987.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow[i]); }
2132a9cb453dcc5745136fa514f10eacaf066987.cu
#include "includes.h" __global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow[i]); }
d7f1122ab8a7f33a2ffab18f2c43c06db9453ffd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#define CUDA_DEBUG #include <stdio.h> typedef unsigned int uint32_t; #include "mainkernel.h" #include "mpz.h" // multiple precision cuda code #include "cuda_string.h" //copied constants from prime.h static const unsigned int nFractionalBits = 24; static const unsigned int TARGET_FRACTIONAL_MASK = (1u<<nFractionalBits) - 1; static const unsigned int TARGET_LENGTH_MASK = ~TARGET_FRACTIONAL_MASK; //ignore intger conversion warnings here //these are reciprocals for a quick integer division, see http://stackoverflow.com/questions/980702/efficient-cycles-wise-algorithm-to-compute-modulo-25 and http://www.hackersdelight.org/magic.htm __device__ const unsigned int magicNumbers[] = {2147483648,2863311531,3435973837,613566757,3123612579,1321528399,4042322161,2938661835,2987803337,2369637129,138547333,3134165325,3352169597,799063683,2924233053,891408307,582368447,1126548799,128207979,3871519817,3235934265,3479467177,827945503,3088515809,1372618415,1148159575,1042467791,842937507,748664025,570128403,33818641,4196609267,125400505,1977538899,3689636335,910191745,875407347,3372735055,1645975491,2060591247,1847555765,3037324939,2878302691,356059465,1286310003,1381296015,2605477791,635578121,548696263,1200340205,423966729,2300233531,285143057,2190262207,4278255361,2090326289,4087403821,4057238479,3969356057,122276649,3885200099,3752599413,3581471101,883851791,2730666109,867122735,2348607495,815661445,1584310703,3150463117,1934560341,1830445673,1497972245,1600537411,1507204883,717696885,2826508041,692387675,2741924259,2688292489,953298231,928365853,807174829,158705489,313072787,668967819,2448800953,601483385,2385057761,37105549,2354414621,295895451,1128862041,183695139,550857529,2185907809,2160140723,4220774003,131394793,1016184499,1005038051,123374285,3905902763,966178935,1925589541,3811132159,3746206569,1854151143,3671157355,3022913755,1811386537,3587313631,3564057141,3552541609,871245347,6700417,3419942855,849699867,1683785035,3336909341,2358658289,1633746847,1624093985,12576771,795594521,1978993491,1550792141,3058446809,1754615251,3000031727,1487837115,2959654449,1561286381,726229609,1444824741,2859588109,1422395379,2794184569,1223284287,1141431359,42367125,669617313,1048953131,2659036585,2652621539,947042849,644496851,2565954791,824993719,801262729,626859537,2496053639,2490400063,1239584699,151531371,2413856483,490719877,439205483,398785651,1168450189,2322094251,18027145,2274067483,234431789,206615623,179139023,143021111,116313057,63849861,2170802819,21074423,12619885,2132903255,2128773723,4232961031,2096304343,4184630363,2072594963,2068695443,1028542215,4046040949,2015603351,2011915147,4009158169,498418689,991444209,3937373779,1958168527,3895523925,3821065605,953609391,3781639305,469475503,3724002127,1852589095,3686543597,114437097,226609981,3613842655,449514157,1789278483,3572742901,1777706755,3521254213,3493285553,2593124343,2582353285,856984901,2528999363,1703348765,3390937943,3380512307,3375323493,2435019715,3334379463,3329331197,3314277703,403935205,2139628917,3203238537,3184682485,3143707299,3121395679,1886405173,3082022783,3077709245,1843269287,764080353,3039423989,3031045149,3026873029,3014425299,1494917237,1644325765,2965641613,1620360897,1612415527,1596588647,2933986999,2910685977,1480536987,1450357997,2850321783,1383595017,1415984067,1410534481,350833321,1304073457,2785336613,2778298491,1212918129,686765539,1178643795,1366701837,340828155,1358260195,1356584365,2703163191,1343325141,2654222397,994324961,2638300247,2635138713,900598577,888352105,882250493,2573461973,816068743,2552551661,2537822569,2526161121,629371281,1254434259,705654093,77343249,638343429,1230567015,621797949,2442002505,562069161,530097445,2401991541,1190591909,431573823,2355675689,146914969,401291659,391283649,2340631459,361513923,332120039,1153132279,2299031109,284700059,255520559,218164065,213537585,1114558163,2222358015,131853551,1103373435,1101163373,105279339,96492027,79022273,66010901,44496455,2167593155,18957679,2142253537,4263738741,4251374105,4226858733,4222800299,4214706767,526333953,4190611255,1041697421,130088929,2065780419,2063841629,1029023517,1027101007,4104569773,510688169,4070380853,1009189195,1996389701,3985542829,3974737019,3625872887,3932093439,3928581073,3921575133,3907637949,1940029339,1938319309,483727069,3856244201,3846127251,1918031623,3363798941,952370401,3317402261,1885146383,117519413,1878704191,3747802737,3187885127,932975501,3709866311,3700501903,28861603,3691184651,460239275,3056552155,114580203,3648317305,3639260663,3630248875,2923820577,3603479321,3594643655,3577101677,2836036371,1778425601,3551107397,3514220145,1744564265,3475342957,2633818071,3458943383,862700375,1724048025,3440005093,2526353389,1697432077,1696122835,842860581,2427304789,3356006495,3340711365,3323042321,3310535575,2321123733,1651538307,2291399625,2276637503,3278454351,3273573883,1635569547,3266280365,3259019275,812346973,2194219737,2189435965,3235047085,3223192753,201302019,2123196165,399967853,2095220153,198682983,3167480383,2012736557,3151591911,393103907,785085061,1981231793,1945609521,1914770101,775122755,3093947599,385658235,1862605699,1853996719,1815552345,1798619835,3036276501,1765034787,3023751469,1735950101,1502578241,748221591,2978697265,1654368533,2968644287,1481322503,1626333965,2933008677,1465526995,2921319503,2913578345,1524478965,2896309853,723125043,1442455399,1452234941,1433989733,2856801891,2853095369,2847553585,2829235453,2820164483,2818357265,2803982475,2780933615,1388710613,2775668357,1235427563,1225015775,1218096335,2746204503,2741069811,2734253349,1166748335,1362045993,2705657651,1113021037,1350336663,2699015963,2689114345,2666290701,2664675257,332480081,1015079805,1005480443,999100127,989558401,986385465,967427573,1314026445,942361089,327138241,923717381,2607795145,2595483335,162122033,2581770773,2577232061,2562217601,2550331407,1272214785,2541488883,785074877,2537090575,776288411,744329767,2513887689,2505295649,156313851,692893789,1246258575,684423037,310684269,673173355,1239934173,650825801,648046485,631435965}; __device__ const unsigned int magicShifts[] = {0,1,2,3,3,2,4,5,4,4,5,6,5,3,5,6,3,4,1,6,7,6,4,6,7,7,7,7,7,7,7,7,2,6,7,5,5,7,6,8,8,7,7,4,8,6,7,8,8,6,8,7,4,7,8,7,8,8,8,3,8,8,8,6,9,6,9,6,7,8,9,9,7,9,9,6,8,6,8,8,9,9,9,4,5,9,8,6,8,2,8,9,7,9,6,8,8,9,4,7,7,4,9,7,8,9,9,8,9,10,8,9,9,9,7,0,9,7,8,9,10,8,8,1,7,10,8,9,10,9,8,9,10,7,8,9,8,9,10,10,3,7,10,9,9,10,7,9,10,10,7,9,9,8,5,9,10,10,10,8,9,2,9,10,10,10,10,10,10,9,10,10,9,9,10,9,10,9,9,8,10,9,9,10,7,8,10,9,10,10,8,10,7,10,9,10,5,6,10,7,9,10,9,10,10,11,11,8,11,9,10,10,10,11,10,10,10,7,11,10,10,10,10,11,10,10,11,8,10,10,10,10,9,11,10,11,11,11,10,10,11,11,10,11,9,9,7,11,10,10,11,8,11,9,7,9,9,10,9,10,11,10,10,11,11,11,10,11,10,10,10,8,9,11,5,11,9,11,10,11,11,10,9,11,10,6,11,11,10,11,11,9,10,7,11,11,11,9,10,11,9,9,11,11,11,11,11,10,11,10,11,11,11,11,11,8,11,9,6,10,10,9,9,11,8,11,9,10,11,11,12,11,11,11,11,10,10,8,11,11,10,12,9,12,10,6,10,11,12,9,11,11,4,11,8,12,6,11,11,11,12,11,11,11,12,10,11,11,10,11,12,11,9,10,11,12,10,10,9,12,11,11,11,11,12,10,12,12,11,11,10,11,11,9,12,12,11,11,7,12,8,12,7,11,12,11,8,9,12,12,12,9,11,8,12,12,12,12,11,12,11,12,10,9,11,12,11,10,12,11,10,11,11,12,11,9,10,12,10,11,11,11,11,11,11,11,11,10,11,12,12,12,11,11,11,12,10,11,12,10,11,11,11,11,8,12,12,12,12,12,12,10,12,8,12,11,11,7,11,11,11,11,10,11,12,11,12,12,11,11,7,12,10,12,8,12,10,12,12,12}; __device__ const unsigned int magicAdds[] = {0,0,0,1,0,0,0,1,0,0,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,1,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,1,0,0,0,0,0,0,0,1,0,0,1,1,1,0,1,0,0,0,0,0,1,0,1,0,1,0,1,1,0,0,1,0,0,1,1,0,1,1,0,0,0,1,1,1,0,0,1,0,0,1,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,0,1,0,0,0,1,1,1,0,0,0,1,1,1,1,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,1,1,1,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,1,0,1,0,1,0,1,1,1}; __device__ const unsigned int primes_len = 1229; __device__ const unsigned int primes[] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907, 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, 4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, 4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481, 4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657, 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, 4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937, 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209, 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387, 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, 5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653, 5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, 5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923, 5927, 5939, 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073, 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133, 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, 6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361, 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691, 6701, 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833, 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997, 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, 7307, 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487, 7489, 7499, 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621, 7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727, 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087, 8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, 8221, 8231, 8233, 8237, 8243, 8263, 8269, 8273, 8287, 8291, 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387, 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, 8513, 8521, 8527, 8537, 8539, 8543, 8563, 8573, 8581, 8597, 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, 8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731, 8737, 8741, 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, 8837, 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001, 9007, 9011, 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, 9127, 9133, 9137, 9151, 9157, 9161, 9173, 9181, 9187, 9199, 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311, 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437, 9439, 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, 9539, 9547, 9551, 9587, 9601, 9613, 9619, 9623, 9629, 9631, 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, 9739, 9743, 9749, 9767, 9769, 9781, 9787, 9791, 9803, 9811, 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887, 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973}; __device__ __inline__ void mpz_2powmod(mpz_cuda_t *result, mpz_cuda_t *mpzExp, mpz_cuda_t *mod, mpz_cuda_t *base, // temps mpz_cuda_t *tmp1, mpz_cuda_t *tmp2, mpz_cuda_t *tmp3) { unsigned int iteration; mpz_cuda_t *b = tmp3; // result = 1 mpz_set_ui(result, 1); // _base = base % mod mpz_set_ui(base,2); mpz_set(tmp1, base); mpz_div(tmp2, b, tmp1, mod); iteration = 0; while (!bits_is_zero(mpzExp->digits, mpzExp->capacity, iteration)) { // if (binary_exp is odd) if (digits_bit_at(mpzExp->digits, iteration) == 1) { // result = (result * base) % mod mpz_mult(tmp1, result, b); mpz_div(tmp2, result, tmp1, mod); } // binary_exp = binary_exp >> 1 iteration++; // base = (base * base) % mod mpz_set(tmp1, b); mpz_mult(tmp2, b, tmp1); mpz_div(tmp1, b, tmp2, mod); } } __device__ unsigned int fastMod(unsigned int N, unsigned int p, unsigned int p_i, unsigned int index) { unsigned int magic = magicNumbers[p_i]; unsigned int shift = magicShifts[p_i]; unsigned int div = __umulhi(N,magic); unsigned int add = magicAdds[p_i]*N; //this can overflow?: div += add; we could also do add with carry in asm: /* unsigned int carry = 0; unsigned int temp = 0; asm volatile ("add.cc.u32 %0, %2, %3;" "addc.u32 %1, 0, 0;" : "=r"(temp), "=r"(carry) : "r"(div), "r"(add)); carry <<= 32-shift; div = temp;*/ div += add; div >>= shift; //div |= carry; #ifdef MOD_DEBUG if(index==0 && N/p != div) printf("fast mod not working, N:%i, Divisor %i, magic %i, shift %i, should be %i is %i\n",N,p,magic,shift,N/p,div); #endif //else if (index==0 && N/p == div) // printf("fast mod working, N:%i, Divisor %i, magic %i, shift %i, carry %i, should be %i is %i\n",N,p,magic,shift,carry,N/p,div); return N - div*p; } __device__ __host__ unsigned int fastMpzPrimeMod(mpz_cuda_t *N, unsigned int p_i, unsigned int index) { unsigned int p = primes[p_i]; //bool found_composite = false; unsigned int mod = 0; for (int i=N->capacity-1; i >= 0; i--) { unsigned int digit = N->digits[i]; unsigned int hi = digit >> 16; unsigned int low = digit & 0x0000ffff; //mod = fastMod((mod * 0x10000 +hi),p,p_i,index); //mod = fastMod((mod * 0x10000 +low),p,p_i,index); mod = (mod * 0x10000 +hi) % p; mod = (mod * 0x10000 +low) % p; } #ifdef CUDA_DEBUG if(index==1) { printf("N: "); mpz_print(N); printf(" mod %i = %i ",p,mod); } #endif return mod; } #define MAXCHAIN 13 __device__ __host__ int fastModPrimeChecks(mpz_cuda_t *N,unsigned int index, bool sophieGermain) { #ifdef CUDA_DEBUG if(index == 1) { printf("testing N:"); mpz_print(N); printf("\n"); } #endif int factors = MAXCHAIN; for (int i=0; i < 1000; i++) { int p = primes[i]; int mod = fastMpzPrimeMod(N, i, index); //unsigned int mod = N % p if(mod==0) factors = 0; for(int j=1; j <= MAXCHAIN; j++) { mod = mod*2; if(sophieGermain) { mod += 1; } else { mod -= 1; } mod = mod % p; if(mod==0) { //factors = min(j,factors); if(j < factors) { #ifdef CUDA_DEBUG if(index==1) { printf("Found better divisor:%i in chain %i previous %i\n",p,j,factors); printf("For N:"); mpz_print(N); printf("\n"); } #endif factors = j; } } } } #ifdef CUDA_DEBUG if(index==1) { printf("factors is %i \n",factors); } #endif return factors; } __global__ void runPrimeCandidateSearch(cudaCandidate *candidates, char *result, unsigned int num_candidates) { unsigned int threadIndex = threadIdx.x; //even do cunningFirstBound check, odd do cunningSecondBound check unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ mpz_cuda_t mpzN[48]; #ifdef DO_FERMAT_TEST __shared__ mpz_cuda_t mpzExp[48]; __shared__ mpz_cuda_t mpzTmp1[48]; __shared__ mpz_cuda_t mpzTmp2[48]; __shared__ mpz_cuda_t mpzTmp3[48]; __shared__ mpz_cuda_t mpzResult[48]; __shared__ mpz_cuda_t mpzBase[48]; #endif //check bounds if (index < 2*num_candidates) { mpz_init(mpzN+threadIndex); #ifdef DO_FERMAT_TEST mpz_init(mpzExp+threadIndex); mpz_init(mpzTmp1+threadIndex); mpz_init(mpzTmp2+threadIndex); mpz_init(mpzTmp3+threadIndex); mpz_init(mpzResult+threadIndex); mpz_init(mpzBase+threadIndex); #endif mpzN[threadIndex] = candidates[index/2].chainOrigin; #ifdef CUDA_DEBUG if(index == 1) { printf("[1] chain Origin:"); mpz_print(mpzN+threadIndex); printf("\n"); } #endif bool sophieGermain; if(index % 2 == 0) { //sloppy add mpzN[threadIndex].digits[0] -= 1; //mpz_addeq_i(&mpzN[threadIndex],-1); sophieGermain = true; } else { sophieGermain = false; mpzN[threadIndex].digits[0] += 1; //mpz_addeq_i(&mpzN[threadIndex],1); } #ifdef DO_FERMAT_TEST mpzExp[threadIndex] = mpzN[threadIndex]; mpzExp[threadIndex].digits[0] -= 1; mpz_2powmod(&mpzResult[threadIndex], &mpzExp[threadIndex], &mpzN[threadIndex] , &mpzBase[threadIndex], &mpzTmp1[threadIndex], &mpzTmp2[threadIndex], &mpzTmp3[threadIndex]); unsigned int myresult = 0; if(mpzResult[threadIndex].digits[0] == 1); myresult = 1; #else int myresult = fastModPrimeChecks(&mpzN[threadIndex],index,sophieGermain); #endif result[index] = myresult; } } void runCandidateSearchKernel(cudaCandidate *candidates, char *result, unsigned int num_candidates) { //TODO: make gridsize dynamic hipLaunchKernelGGL(( runPrimeCandidateSearch), dim3(400) , dim3(48), 0, 0, candidates, result, num_candidates); }
d7f1122ab8a7f33a2ffab18f2c43c06db9453ffd.cu
//#define CUDA_DEBUG #include <stdio.h> typedef unsigned int uint32_t; #include "mainkernel.h" #include "mpz.h" // multiple precision cuda code #include "cuda_string.h" //copied constants from prime.h static const unsigned int nFractionalBits = 24; static const unsigned int TARGET_FRACTIONAL_MASK = (1u<<nFractionalBits) - 1; static const unsigned int TARGET_LENGTH_MASK = ~TARGET_FRACTIONAL_MASK; //ignore intger conversion warnings here //these are reciprocals for a quick integer division, see http://stackoverflow.com/questions/980702/efficient-cycles-wise-algorithm-to-compute-modulo-25 and http://www.hackersdelight.org/magic.htm __device__ const unsigned int magicNumbers[] = {2147483648,2863311531,3435973837,613566757,3123612579,1321528399,4042322161,2938661835,2987803337,2369637129,138547333,3134165325,3352169597,799063683,2924233053,891408307,582368447,1126548799,128207979,3871519817,3235934265,3479467177,827945503,3088515809,1372618415,1148159575,1042467791,842937507,748664025,570128403,33818641,4196609267,125400505,1977538899,3689636335,910191745,875407347,3372735055,1645975491,2060591247,1847555765,3037324939,2878302691,356059465,1286310003,1381296015,2605477791,635578121,548696263,1200340205,423966729,2300233531,285143057,2190262207,4278255361,2090326289,4087403821,4057238479,3969356057,122276649,3885200099,3752599413,3581471101,883851791,2730666109,867122735,2348607495,815661445,1584310703,3150463117,1934560341,1830445673,1497972245,1600537411,1507204883,717696885,2826508041,692387675,2741924259,2688292489,953298231,928365853,807174829,158705489,313072787,668967819,2448800953,601483385,2385057761,37105549,2354414621,295895451,1128862041,183695139,550857529,2185907809,2160140723,4220774003,131394793,1016184499,1005038051,123374285,3905902763,966178935,1925589541,3811132159,3746206569,1854151143,3671157355,3022913755,1811386537,3587313631,3564057141,3552541609,871245347,6700417,3419942855,849699867,1683785035,3336909341,2358658289,1633746847,1624093985,12576771,795594521,1978993491,1550792141,3058446809,1754615251,3000031727,1487837115,2959654449,1561286381,726229609,1444824741,2859588109,1422395379,2794184569,1223284287,1141431359,42367125,669617313,1048953131,2659036585,2652621539,947042849,644496851,2565954791,824993719,801262729,626859537,2496053639,2490400063,1239584699,151531371,2413856483,490719877,439205483,398785651,1168450189,2322094251,18027145,2274067483,234431789,206615623,179139023,143021111,116313057,63849861,2170802819,21074423,12619885,2132903255,2128773723,4232961031,2096304343,4184630363,2072594963,2068695443,1028542215,4046040949,2015603351,2011915147,4009158169,498418689,991444209,3937373779,1958168527,3895523925,3821065605,953609391,3781639305,469475503,3724002127,1852589095,3686543597,114437097,226609981,3613842655,449514157,1789278483,3572742901,1777706755,3521254213,3493285553,2593124343,2582353285,856984901,2528999363,1703348765,3390937943,3380512307,3375323493,2435019715,3334379463,3329331197,3314277703,403935205,2139628917,3203238537,3184682485,3143707299,3121395679,1886405173,3082022783,3077709245,1843269287,764080353,3039423989,3031045149,3026873029,3014425299,1494917237,1644325765,2965641613,1620360897,1612415527,1596588647,2933986999,2910685977,1480536987,1450357997,2850321783,1383595017,1415984067,1410534481,350833321,1304073457,2785336613,2778298491,1212918129,686765539,1178643795,1366701837,340828155,1358260195,1356584365,2703163191,1343325141,2654222397,994324961,2638300247,2635138713,900598577,888352105,882250493,2573461973,816068743,2552551661,2537822569,2526161121,629371281,1254434259,705654093,77343249,638343429,1230567015,621797949,2442002505,562069161,530097445,2401991541,1190591909,431573823,2355675689,146914969,401291659,391283649,2340631459,361513923,332120039,1153132279,2299031109,284700059,255520559,218164065,213537585,1114558163,2222358015,131853551,1103373435,1101163373,105279339,96492027,79022273,66010901,44496455,2167593155,18957679,2142253537,4263738741,4251374105,4226858733,4222800299,4214706767,526333953,4190611255,1041697421,130088929,2065780419,2063841629,1029023517,1027101007,4104569773,510688169,4070380853,1009189195,1996389701,3985542829,3974737019,3625872887,3932093439,3928581073,3921575133,3907637949,1940029339,1938319309,483727069,3856244201,3846127251,1918031623,3363798941,952370401,3317402261,1885146383,117519413,1878704191,3747802737,3187885127,932975501,3709866311,3700501903,28861603,3691184651,460239275,3056552155,114580203,3648317305,3639260663,3630248875,2923820577,3603479321,3594643655,3577101677,2836036371,1778425601,3551107397,3514220145,1744564265,3475342957,2633818071,3458943383,862700375,1724048025,3440005093,2526353389,1697432077,1696122835,842860581,2427304789,3356006495,3340711365,3323042321,3310535575,2321123733,1651538307,2291399625,2276637503,3278454351,3273573883,1635569547,3266280365,3259019275,812346973,2194219737,2189435965,3235047085,3223192753,201302019,2123196165,399967853,2095220153,198682983,3167480383,2012736557,3151591911,393103907,785085061,1981231793,1945609521,1914770101,775122755,3093947599,385658235,1862605699,1853996719,1815552345,1798619835,3036276501,1765034787,3023751469,1735950101,1502578241,748221591,2978697265,1654368533,2968644287,1481322503,1626333965,2933008677,1465526995,2921319503,2913578345,1524478965,2896309853,723125043,1442455399,1452234941,1433989733,2856801891,2853095369,2847553585,2829235453,2820164483,2818357265,2803982475,2780933615,1388710613,2775668357,1235427563,1225015775,1218096335,2746204503,2741069811,2734253349,1166748335,1362045993,2705657651,1113021037,1350336663,2699015963,2689114345,2666290701,2664675257,332480081,1015079805,1005480443,999100127,989558401,986385465,967427573,1314026445,942361089,327138241,923717381,2607795145,2595483335,162122033,2581770773,2577232061,2562217601,2550331407,1272214785,2541488883,785074877,2537090575,776288411,744329767,2513887689,2505295649,156313851,692893789,1246258575,684423037,310684269,673173355,1239934173,650825801,648046485,631435965}; __device__ const unsigned int magicShifts[] = {0,1,2,3,3,2,4,5,4,4,5,6,5,3,5,6,3,4,1,6,7,6,4,6,7,7,7,7,7,7,7,7,2,6,7,5,5,7,6,8,8,7,7,4,8,6,7,8,8,6,8,7,4,7,8,7,8,8,8,3,8,8,8,6,9,6,9,6,7,8,9,9,7,9,9,6,8,6,8,8,9,9,9,4,5,9,8,6,8,2,8,9,7,9,6,8,8,9,4,7,7,4,9,7,8,9,9,8,9,10,8,9,9,9,7,0,9,7,8,9,10,8,8,1,7,10,8,9,10,9,8,9,10,7,8,9,8,9,10,10,3,7,10,9,9,10,7,9,10,10,7,9,9,8,5,9,10,10,10,8,9,2,9,10,10,10,10,10,10,9,10,10,9,9,10,9,10,9,9,8,10,9,9,10,7,8,10,9,10,10,8,10,7,10,9,10,5,6,10,7,9,10,9,10,10,11,11,8,11,9,10,10,10,11,10,10,10,7,11,10,10,10,10,11,10,10,11,8,10,10,10,10,9,11,10,11,11,11,10,10,11,11,10,11,9,9,7,11,10,10,11,8,11,9,7,9,9,10,9,10,11,10,10,11,11,11,10,11,10,10,10,8,9,11,5,11,9,11,10,11,11,10,9,11,10,6,11,11,10,11,11,9,10,7,11,11,11,9,10,11,9,9,11,11,11,11,11,10,11,10,11,11,11,11,11,8,11,9,6,10,10,9,9,11,8,11,9,10,11,11,12,11,11,11,11,10,10,8,11,11,10,12,9,12,10,6,10,11,12,9,11,11,4,11,8,12,6,11,11,11,12,11,11,11,12,10,11,11,10,11,12,11,9,10,11,12,10,10,9,12,11,11,11,11,12,10,12,12,11,11,10,11,11,9,12,12,11,11,7,12,8,12,7,11,12,11,8,9,12,12,12,9,11,8,12,12,12,12,11,12,11,12,10,9,11,12,11,10,12,11,10,11,11,12,11,9,10,12,10,11,11,11,11,11,11,11,11,10,11,12,12,12,11,11,11,12,10,11,12,10,11,11,11,11,8,12,12,12,12,12,12,10,12,8,12,11,11,7,11,11,11,11,10,11,12,11,12,12,11,11,7,12,10,12,8,12,10,12,12,12}; __device__ const unsigned int magicAdds[] = {0,0,0,1,0,0,0,1,0,0,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,1,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,1,0,0,0,0,0,0,0,1,0,0,1,1,1,0,1,0,0,0,0,0,1,0,1,0,1,0,1,1,0,0,1,0,0,1,1,0,1,1,0,0,0,1,1,1,0,0,1,0,0,1,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,0,1,0,0,0,1,1,1,0,0,0,1,1,1,1,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,1,1,1,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,1,0,1,0,1,0,1,1,1}; __device__ const unsigned int primes_len = 1229; __device__ const unsigned int primes[] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907, 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, 4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, 4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481, 4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657, 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, 4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937, 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209, 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387, 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, 5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653, 5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, 5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923, 5927, 5939, 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073, 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133, 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, 6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361, 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691, 6701, 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833, 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997, 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, 7307, 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487, 7489, 7499, 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621, 7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727, 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087, 8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, 8221, 8231, 8233, 8237, 8243, 8263, 8269, 8273, 8287, 8291, 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387, 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, 8513, 8521, 8527, 8537, 8539, 8543, 8563, 8573, 8581, 8597, 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, 8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731, 8737, 8741, 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, 8837, 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001, 9007, 9011, 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, 9127, 9133, 9137, 9151, 9157, 9161, 9173, 9181, 9187, 9199, 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311, 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437, 9439, 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, 9539, 9547, 9551, 9587, 9601, 9613, 9619, 9623, 9629, 9631, 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, 9739, 9743, 9749, 9767, 9769, 9781, 9787, 9791, 9803, 9811, 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887, 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973}; __device__ __inline__ void mpz_2powmod(mpz_cuda_t *result, mpz_cuda_t *mpzExp, mpz_cuda_t *mod, mpz_cuda_t *base, // temps mpz_cuda_t *tmp1, mpz_cuda_t *tmp2, mpz_cuda_t *tmp3) { unsigned int iteration; mpz_cuda_t *b = tmp3; // result = 1 mpz_set_ui(result, 1); // _base = base % mod mpz_set_ui(base,2); mpz_set(tmp1, base); mpz_div(tmp2, b, tmp1, mod); iteration = 0; while (!bits_is_zero(mpzExp->digits, mpzExp->capacity, iteration)) { // if (binary_exp is odd) if (digits_bit_at(mpzExp->digits, iteration) == 1) { // result = (result * base) % mod mpz_mult(tmp1, result, b); mpz_div(tmp2, result, tmp1, mod); } // binary_exp = binary_exp >> 1 iteration++; // base = (base * base) % mod mpz_set(tmp1, b); mpz_mult(tmp2, b, tmp1); mpz_div(tmp1, b, tmp2, mod); } } __device__ unsigned int fastMod(unsigned int N, unsigned int p, unsigned int p_i, unsigned int index) { unsigned int magic = magicNumbers[p_i]; unsigned int shift = magicShifts[p_i]; unsigned int div = __umulhi(N,magic); unsigned int add = magicAdds[p_i]*N; //this can overflow?: div += add; we could also do add with carry in asm: /* unsigned int carry = 0; unsigned int temp = 0; asm volatile ("add.cc.u32 %0, %2, %3;" "addc.u32 %1, 0, 0;" : "=r"(temp), "=r"(carry) : "r"(div), "r"(add)); carry <<= 32-shift; div = temp;*/ div += add; div >>= shift; //div |= carry; #ifdef MOD_DEBUG if(index==0 && N/p != div) printf("fast mod not working, N:%i, Divisor %i, magic %i, shift %i, should be %i is %i\n",N,p,magic,shift,N/p,div); #endif //else if (index==0 && N/p == div) // printf("fast mod working, N:%i, Divisor %i, magic %i, shift %i, carry %i, should be %i is %i\n",N,p,magic,shift,carry,N/p,div); return N - div*p; } __device__ __host__ unsigned int fastMpzPrimeMod(mpz_cuda_t *N, unsigned int p_i, unsigned int index) { unsigned int p = primes[p_i]; //bool found_composite = false; unsigned int mod = 0; for (int i=N->capacity-1; i >= 0; i--) { unsigned int digit = N->digits[i]; unsigned int hi = digit >> 16; unsigned int low = digit & 0x0000ffff; //mod = fastMod((mod * 0x10000 +hi),p,p_i,index); //mod = fastMod((mod * 0x10000 +low),p,p_i,index); mod = (mod * 0x10000 +hi) % p; mod = (mod * 0x10000 +low) % p; } #ifdef CUDA_DEBUG if(index==1) { printf("N: "); mpz_print(N); printf(" mod %i = %i ",p,mod); } #endif return mod; } #define MAXCHAIN 13 __device__ __host__ int fastModPrimeChecks(mpz_cuda_t *N,unsigned int index, bool sophieGermain) { #ifdef CUDA_DEBUG if(index == 1) { printf("testing N:"); mpz_print(N); printf("\n"); } #endif int factors = MAXCHAIN; for (int i=0; i < 1000; i++) { int p = primes[i]; int mod = fastMpzPrimeMod(N, i, index); //unsigned int mod = N % p if(mod==0) factors = 0; for(int j=1; j <= MAXCHAIN; j++) { mod = mod*2; if(sophieGermain) { mod += 1; } else { mod -= 1; } mod = mod % p; if(mod==0) { //factors = min(j,factors); if(j < factors) { #ifdef CUDA_DEBUG if(index==1) { printf("Found better divisor:%i in chain %i previous %i\n",p,j,factors); printf("For N:"); mpz_print(N); printf("\n"); } #endif factors = j; } } } } #ifdef CUDA_DEBUG if(index==1) { printf("factors is %i \n",factors); } #endif return factors; } __global__ void runPrimeCandidateSearch(cudaCandidate *candidates, char *result, unsigned int num_candidates) { unsigned int threadIndex = threadIdx.x; //even do cunningFirstBound check, odd do cunningSecondBound check unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ mpz_cuda_t mpzN[48]; #ifdef DO_FERMAT_TEST __shared__ mpz_cuda_t mpzExp[48]; __shared__ mpz_cuda_t mpzTmp1[48]; __shared__ mpz_cuda_t mpzTmp2[48]; __shared__ mpz_cuda_t mpzTmp3[48]; __shared__ mpz_cuda_t mpzResult[48]; __shared__ mpz_cuda_t mpzBase[48]; #endif //check bounds if (index < 2*num_candidates) { mpz_init(mpzN+threadIndex); #ifdef DO_FERMAT_TEST mpz_init(mpzExp+threadIndex); mpz_init(mpzTmp1+threadIndex); mpz_init(mpzTmp2+threadIndex); mpz_init(mpzTmp3+threadIndex); mpz_init(mpzResult+threadIndex); mpz_init(mpzBase+threadIndex); #endif mpzN[threadIndex] = candidates[index/2].chainOrigin; #ifdef CUDA_DEBUG if(index == 1) { printf("[1] chain Origin:"); mpz_print(mpzN+threadIndex); printf("\n"); } #endif bool sophieGermain; if(index % 2 == 0) { //sloppy add mpzN[threadIndex].digits[0] -= 1; //mpz_addeq_i(&mpzN[threadIndex],-1); sophieGermain = true; } else { sophieGermain = false; mpzN[threadIndex].digits[0] += 1; //mpz_addeq_i(&mpzN[threadIndex],1); } #ifdef DO_FERMAT_TEST mpzExp[threadIndex] = mpzN[threadIndex]; mpzExp[threadIndex].digits[0] -= 1; mpz_2powmod(&mpzResult[threadIndex], &mpzExp[threadIndex], &mpzN[threadIndex] , &mpzBase[threadIndex], &mpzTmp1[threadIndex], &mpzTmp2[threadIndex], &mpzTmp3[threadIndex]); unsigned int myresult = 0; if(mpzResult[threadIndex].digits[0] == 1); myresult = 1; #else int myresult = fastModPrimeChecks(&mpzN[threadIndex],index,sophieGermain); #endif result[index] = myresult; } } void runCandidateSearchKernel(cudaCandidate *candidates, char *result, unsigned int num_candidates) { //TODO: make gridsize dynamic runPrimeCandidateSearch<<< 400 , 48>>>(candidates, result, num_candidates); }
43a2cd067747afa66bdd8c8922a0b0e095e65bb3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for functional operators. */ #include "../common/cutlass_unit_test.h" #include "cutlass/functional.h" #include "cutlass/core_io.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Conversion template template <typename Element, typename Operator> __global__ void unary_operator(Element *d, Element const *a) { Operator op; *d = op(*a); } /// Conversion template template <typename Element, typename Operator> __global__ void binary_operator(Element *d, Element const *a, Element const *b, int Iterations = 1) { Operator op; Element a_x = *a; Element b_x = *b; CUTLASS_PRAGMA_NO_UNROLL for (int i = 0; i < Iterations; ++i) { b_x = op(a_x, b_x); } *d = b_x; } /// Conversion template template <typename Element, typename Operator> __global__ void trinary_operator( Element *d, Element const *a, Element const *b, Element const *c, int Iterations = 1) { Operator op; Element a_x = a[blockIdx.x]; Element b_x = b[blockIdx.x]; Element c_x = c[blockIdx.x]; CUTLASS_PRAGMA_NO_UNROLL for (int i = 0; i < Iterations; ++i) { c_x = op(a_x, b_x, c_x); } d[blockIdx.x] = c_x; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// template <int kN> void Functional_plus_f16xN() { using Element = cutlass::Array<cutlass::half_t, kN>; using Operator = cutlass::plus<Element>; using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5); B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7); D.host_data()[i] = cutlass::half_t(0); } D.sync_device(); A.sync_device(); B.sync_device(); hipLaunchKernelGGL(( test::core::kernel::binary_operator<Element, Operator>), dim3(dim3(1,1)), dim3(dim3(1,1)) , 0, 0, reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float d = float(D.host_data()[i]); EXPECT_TRUE(d == (a + b)); if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } TEST(Functional, plus_f16x16) { Functional_plus_f16xN<16>(); } TEST(Functional, plus_f16x17) { Functional_plus_f16xN<17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <int kN> void Functional_minus_f16xN() { using Element = cutlass::Array<cutlass::half_t, kN>; using Operator = cutlass::minus<Element>; using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5); B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7); D.host_data()[i] = cutlass::half_t(0); } D.sync_device(); A.sync_device(); B.sync_device(); hipLaunchKernelGGL(( test::core::kernel::binary_operator<Element, Operator>), dim3(dim3(1,1)), dim3(dim3(1,1)) , 0, 0, reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float d = float(D.host_data()[i]); EXPECT_TRUE(d == (a - b)); if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } TEST(Functional, minus_f16x16) { Functional_minus_f16xN<16>(); } TEST(Functional, minus_f16x17) { Functional_minus_f16xN<17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <int kN> void Functional_multiplies_f16xN() { using Element = cutlass::Array<cutlass::half_t, kN>; using Operator = cutlass::multiplies<Element>; using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5); B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7); D.host_data()[i] = cutlass::half_t(0); } D.sync_device(); A.sync_device(); B.sync_device(); hipLaunchKernelGGL(( test::core::kernel::binary_operator<Element, Operator>), dim3(dim3(1,1)), dim3(dim3(1,1)) , 0, 0, reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float d = float(D.host_data()[i]); EXPECT_TRUE(d == (a * b)); if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } TEST(Functional, multiplies_f16x16) { Functional_multiplies_f16xN<16>(); } TEST(Functional, multiplies_f16x17) { Functional_multiplies_f16xN<17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <int kN> void Functional_divides_f16xN() { using Element = cutlass::Array<cutlass::half_t, kN>; using Operator = cutlass::divides<Element>; using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5); B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7); D.host_data()[i] = cutlass::half_t(0); } D.sync_device(); A.sync_device(); B.sync_device(); hipLaunchKernelGGL(( test::core::kernel::binary_operator<Element, Operator>), dim3(dim3(1,1)), dim3(dim3(1,1)) , 0, 0, reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float d = float(D.host_data()[i]); float expected = a / b; float const kThreshold = 0.0005f; if (std::isnan(expected)) { EXPECT_TRUE(std::isnan(d)); } else if (std::isinf(expected)) { EXPECT_TRUE(std::isinf(d)); } else { EXPECT_TRUE(std::abs(d - expected) < kThreshold) << "Got: " << d << " = " << a << " / " << b << ", expected: " << (a / b); } if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } TEST(Functional, divides_f16x16) { Functional_divides_f16xN<16>(); } TEST(Functional, divides_f16x17) { Functional_divides_f16xN<17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, int kN> void Functional_multiply_add_TxN() { using Element = cutlass::Array<T, kN>; using Operator = cutlass::multiply_add<Element>; using Tensor = cutlass::HostTensor<T, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = T((i * 2 + 1) % 5); B.host_data()[i] = T((i * 4 + 8) % 7); C.host_data()[i] = T((i * 3 + 11) % 11); D.host_data()[i] = T(0); } D.sync_device(); A.sync_device(); B.sync_device(); C.sync_device(); hipLaunchKernelGGL(( test::core::kernel::trinary_operator<Element, Operator>), dim3(dim3(1,1)), dim3(dim3(1,1)) , 0, 0, reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()), reinterpret_cast<Element const *>(C.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float c = float(C.host_data()[i]); float d = float(D.host_data()[i]); EXPECT_TRUE(d == (a * b + c)); if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Functional, multiply_add_f16x16) { Functional_multiply_add_TxN<cutlass::half_t, 16>(); } TEST(Functional, multiply_add_f16x17) { Functional_multiply_add_TxN<cutlass::half_t, 17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Functional, multiply_add_bf16x16) { Functional_multiply_add_TxN<cutlass::bfloat16_t, 16>(); } TEST(Functional, multiply_add_bf16x17) { Functional_multiply_add_TxN<cutlass::bfloat16_t, 17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> cutlass::Quaternion<T> random_quaternion(int range) { return cutlass::Quaternion<T>{ T((rand() % range * 2) - range), T((rand() % range * 2) - range), T((rand() % range * 2) - range), T((rand() % range * 2) - range) }; } template <typename T> void Functional_multiply_add_QuaternionT() { using Element = cutlass::Quaternion<T>; using Operator = cutlass::multiply_add<Element, Element, Element>; using HostTensor = cutlass::HostTensor<Element, cutlass::layout::RowMajor>; int const kM = 128; int const kRange = 8; HostTensor A({kM, 1}); HostTensor B({kM, 1}); HostTensor C({kM, 1}); HostTensor D({kM, 1}); srand(2021); for (int m = 0; m < kM; ++m) { A.at({m, 0}) = random_quaternion<T>(kRange); B.at({m, 0}) = random_quaternion<T>(kRange); C.at({m, 0}) = random_quaternion<T>(kRange); } A.sync_device(); B.sync_device(); C.sync_device(); D.sync_device(); hipLaunchKernelGGL(( test::core::kernel::trinary_operator<Element, Operator>), dim3(dim3(kM,1)), dim3(dim3(1,1)) , 0, 0, D.device_data(), A.device_data(), B.device_data(), C.device_data() ); D.sync_host(); for (int m = 0; m < kM; ++m) { Element a = A.at({m, 0}); Element b = B.at({m, 0}); Element c = C.at({m, 0}); Element got = D.at({m, 0}); Element expected = a * b + c; EXPECT_TRUE(got == expected); } } TEST(Functional, multiply_add_quaternion_f32) { Functional_multiply_add_QuaternionT<float>(); } /////////////////////////////////////////////////////////////////////////////////////////////////
43a2cd067747afa66bdd8c8922a0b0e095e65bb3.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for functional operators. */ #include "../common/cutlass_unit_test.h" #include "cutlass/functional.h" #include "cutlass/core_io.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Conversion template template <typename Element, typename Operator> __global__ void unary_operator(Element *d, Element const *a) { Operator op; *d = op(*a); } /// Conversion template template <typename Element, typename Operator> __global__ void binary_operator(Element *d, Element const *a, Element const *b, int Iterations = 1) { Operator op; Element a_x = *a; Element b_x = *b; CUTLASS_PRAGMA_NO_UNROLL for (int i = 0; i < Iterations; ++i) { b_x = op(a_x, b_x); } *d = b_x; } /// Conversion template template <typename Element, typename Operator> __global__ void trinary_operator( Element *d, Element const *a, Element const *b, Element const *c, int Iterations = 1) { Operator op; Element a_x = a[blockIdx.x]; Element b_x = b[blockIdx.x]; Element c_x = c[blockIdx.x]; CUTLASS_PRAGMA_NO_UNROLL for (int i = 0; i < Iterations; ++i) { c_x = op(a_x, b_x, c_x); } d[blockIdx.x] = c_x; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// template <int kN> void Functional_plus_f16xN() { using Element = cutlass::Array<cutlass::half_t, kN>; using Operator = cutlass::plus<Element>; using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5); B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7); D.host_data()[i] = cutlass::half_t(0); } D.sync_device(); A.sync_device(); B.sync_device(); test::core::kernel::binary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>( reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float d = float(D.host_data()[i]); EXPECT_TRUE(d == (a + b)); if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } TEST(Functional, plus_f16x16) { Functional_plus_f16xN<16>(); } TEST(Functional, plus_f16x17) { Functional_plus_f16xN<17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <int kN> void Functional_minus_f16xN() { using Element = cutlass::Array<cutlass::half_t, kN>; using Operator = cutlass::minus<Element>; using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5); B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7); D.host_data()[i] = cutlass::half_t(0); } D.sync_device(); A.sync_device(); B.sync_device(); test::core::kernel::binary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>( reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float d = float(D.host_data()[i]); EXPECT_TRUE(d == (a - b)); if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } TEST(Functional, minus_f16x16) { Functional_minus_f16xN<16>(); } TEST(Functional, minus_f16x17) { Functional_minus_f16xN<17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <int kN> void Functional_multiplies_f16xN() { using Element = cutlass::Array<cutlass::half_t, kN>; using Operator = cutlass::multiplies<Element>; using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5); B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7); D.host_data()[i] = cutlass::half_t(0); } D.sync_device(); A.sync_device(); B.sync_device(); test::core::kernel::binary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>( reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float d = float(D.host_data()[i]); EXPECT_TRUE(d == (a * b)); if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } TEST(Functional, multiplies_f16x16) { Functional_multiplies_f16xN<16>(); } TEST(Functional, multiplies_f16x17) { Functional_multiplies_f16xN<17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <int kN> void Functional_divides_f16xN() { using Element = cutlass::Array<cutlass::half_t, kN>; using Operator = cutlass::divides<Element>; using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5); B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7); D.host_data()[i] = cutlass::half_t(0); } D.sync_device(); A.sync_device(); B.sync_device(); test::core::kernel::binary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>( reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float d = float(D.host_data()[i]); float expected = a / b; float const kThreshold = 0.0005f; if (std::isnan(expected)) { EXPECT_TRUE(std::isnan(d)); } else if (std::isinf(expected)) { EXPECT_TRUE(std::isinf(d)); } else { EXPECT_TRUE(std::abs(d - expected) < kThreshold) << "Got: " << d << " = " << a << " / " << b << ", expected: " << (a / b); } if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } TEST(Functional, divides_f16x16) { Functional_divides_f16xN<16>(); } TEST(Functional, divides_f16x17) { Functional_divides_f16xN<17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, int kN> void Functional_multiply_add_TxN() { using Element = cutlass::Array<T, kN>; using Operator = cutlass::multiply_add<Element>; using Tensor = cutlass::HostTensor<T, cutlass::layout::RowMajor>; Tensor D({1, kN}); Tensor A({1, kN}); Tensor B({1, kN}); Tensor C({1, kN}); for (int i = 0; i < kN; ++i) { A.host_data()[i] = T((i * 2 + 1) % 5); B.host_data()[i] = T((i * 4 + 8) % 7); C.host_data()[i] = T((i * 3 + 11) % 11); D.host_data()[i] = T(0); } D.sync_device(); A.sync_device(); B.sync_device(); C.sync_device(); test::core::kernel::trinary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>( reinterpret_cast<Element *>(D.device_data()), reinterpret_cast<Element const *>(A.device_data()), reinterpret_cast<Element const *>(B.device_data()), reinterpret_cast<Element const *>(C.device_data()) ); D.sync_host(); bool some_d_nonzero = false; for (int i = 0; i < kN; ++i) { float a = float(A.host_data()[i]); float b = float(B.host_data()[i]); float c = float(C.host_data()[i]); float d = float(D.host_data()[i]); EXPECT_TRUE(d == (a * b + c)); if (d != 0) { some_d_nonzero = true; } } EXPECT_TRUE(some_d_nonzero); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Functional, multiply_add_f16x16) { Functional_multiply_add_TxN<cutlass::half_t, 16>(); } TEST(Functional, multiply_add_f16x17) { Functional_multiply_add_TxN<cutlass::half_t, 17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Functional, multiply_add_bf16x16) { Functional_multiply_add_TxN<cutlass::bfloat16_t, 16>(); } TEST(Functional, multiply_add_bf16x17) { Functional_multiply_add_TxN<cutlass::bfloat16_t, 17>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> cutlass::Quaternion<T> random_quaternion(int range) { return cutlass::Quaternion<T>{ T((rand() % range * 2) - range), T((rand() % range * 2) - range), T((rand() % range * 2) - range), T((rand() % range * 2) - range) }; } template <typename T> void Functional_multiply_add_QuaternionT() { using Element = cutlass::Quaternion<T>; using Operator = cutlass::multiply_add<Element, Element, Element>; using HostTensor = cutlass::HostTensor<Element, cutlass::layout::RowMajor>; int const kM = 128; int const kRange = 8; HostTensor A({kM, 1}); HostTensor B({kM, 1}); HostTensor C({kM, 1}); HostTensor D({kM, 1}); srand(2021); for (int m = 0; m < kM; ++m) { A.at({m, 0}) = random_quaternion<T>(kRange); B.at({m, 0}) = random_quaternion<T>(kRange); C.at({m, 0}) = random_quaternion<T>(kRange); } A.sync_device(); B.sync_device(); C.sync_device(); D.sync_device(); test::core::kernel::trinary_operator<Element, Operator><<< dim3(kM,1), dim3(1,1) >>>( D.device_data(), A.device_data(), B.device_data(), C.device_data() ); D.sync_host(); for (int m = 0; m < kM; ++m) { Element a = A.at({m, 0}); Element b = B.at({m, 0}); Element c = C.at({m, 0}); Element got = D.at({m, 0}); Element expected = a * b + c; EXPECT_TRUE(got == expected); } } TEST(Functional, multiply_add_quaternion_f32) { Functional_multiply_add_QuaternionT<float>(); } /////////////////////////////////////////////////////////////////////////////////////////////////
3e512bedc761af238b0f1bc6adee26dcbf062103.hip
// !!! This is a file automatically generated by hipify!!! // This program performs general matrix multiplication on row-major layout // using tf::cublasFlowCapturer::c_gemm. #include <taskflow/taskflow.hpp> #include <taskflow/cudaflow.hpp> #include <taskflow/cublasflow.hpp> int main() { const int M = 2, N = 4, K = 3; const std::vector<float> hA = { 11, 12, 13, 14, 15, 16 }; // M x K const std::vector<float> hB = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22 }; // K x N const std::vector<float> golden = { 548, 584, 620, 656, 683, 728, 773, 818 }; // M x N std::vector<float> hC(M*N); //auto dA = tf::cuda_malloc_device<float>(hA.size()); //auto dB = tf::cuda_malloc_device<float>(hB.size()); //auto dC = tf::cuda_malloc_device<float>(hC.size()); //auto dAlpha = tf::cuda_malloc_device<float>(1); //auto dBeta = tf::cuda_malloc_device<float>(1); float *dA, *dB, *dC, *dAlpha, *dBeta; tf::Taskflow taskflow("Matrix Multiplication"); tf::Executor executor; auto malloc_dA = taskflow.emplace( [&](){ dA = tf::cuda_malloc_device<float>(hA.size()); } ).name("malloc_dA"); auto malloc_dB = taskflow.emplace( [&](){ dB = tf::cuda_malloc_device<float>(hB.size()); } ).name("malloc_dB"); auto malloc_dC = taskflow.emplace( [&](){ dC = tf::cuda_malloc_device<float>(hC.size()); } ).name("malloc_dC"); auto malloc_dAlpha = taskflow.emplace( [&](){ dAlpha = tf::cuda_malloc_device<float>(1); } ).name("malloc_dAlpha"); auto malloc_dBeta = taskflow.emplace( [&](){ dBeta = tf::cuda_malloc_device<float>(1); } ).name("malloc_dBeta"); auto cublasFlow = taskflow.emplace([&](tf::cudaFlowCapturer& capturer) { auto blas = capturer.make_capturer<tf::cublasFlowCapturer>(); auto alpha = capturer.single_task([=] __device__ () { *dAlpha = 1; }) .name("alpha=1"); auto beta = capturer.single_task([=] __device__ () { *dBeta = 0; }) .name("beta=0"); auto copyA = capturer.copy(dA, hA.data(), hA.size()).name("copyA"); auto copyB = capturer.copy(dB, hB.data(), hB.size()).name("copyB"); auto gemm = blas->c_gemm(HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, dAlpha, dA, K, dB, N, dBeta, dC, N ).name("C = alpha * A * B + beta * C"); auto copyC = capturer.copy(hC.data(), dC, hC.size()).name("copyC"); gemm.succeed(alpha, beta, copyA, copyB) .precede(copyC); capturer.dump(std::cout); // dump the graph constructed so far. }).name("cublasFlow"); cublasFlow.succeed( malloc_dA, malloc_dB, malloc_dC, malloc_dAlpha, malloc_dBeta ); executor.run(taskflow).wait(); taskflow.dump(std::cout); std::cout << "Matrix C:\n"; for(int m=0; m<M; m++) { for(int n=0; n<N; n++) { std::cout << hC[m*N+n] << ' '; } std::cout << '\n'; } return 0; }
3e512bedc761af238b0f1bc6adee26dcbf062103.cu
// This program performs general matrix multiplication on row-major layout // using tf::cublasFlowCapturer::c_gemm. #include <taskflow/taskflow.hpp> #include <taskflow/cudaflow.hpp> #include <taskflow/cublasflow.hpp> int main() { const int M = 2, N = 4, K = 3; const std::vector<float> hA = { 11, 12, 13, 14, 15, 16 }; // M x K const std::vector<float> hB = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22 }; // K x N const std::vector<float> golden = { 548, 584, 620, 656, 683, 728, 773, 818 }; // M x N std::vector<float> hC(M*N); //auto dA = tf::cuda_malloc_device<float>(hA.size()); //auto dB = tf::cuda_malloc_device<float>(hB.size()); //auto dC = tf::cuda_malloc_device<float>(hC.size()); //auto dAlpha = tf::cuda_malloc_device<float>(1); //auto dBeta = tf::cuda_malloc_device<float>(1); float *dA, *dB, *dC, *dAlpha, *dBeta; tf::Taskflow taskflow("Matrix Multiplication"); tf::Executor executor; auto malloc_dA = taskflow.emplace( [&](){ dA = tf::cuda_malloc_device<float>(hA.size()); } ).name("malloc_dA"); auto malloc_dB = taskflow.emplace( [&](){ dB = tf::cuda_malloc_device<float>(hB.size()); } ).name("malloc_dB"); auto malloc_dC = taskflow.emplace( [&](){ dC = tf::cuda_malloc_device<float>(hC.size()); } ).name("malloc_dC"); auto malloc_dAlpha = taskflow.emplace( [&](){ dAlpha = tf::cuda_malloc_device<float>(1); } ).name("malloc_dAlpha"); auto malloc_dBeta = taskflow.emplace( [&](){ dBeta = tf::cuda_malloc_device<float>(1); } ).name("malloc_dBeta"); auto cublasFlow = taskflow.emplace([&](tf::cudaFlowCapturer& capturer) { auto blas = capturer.make_capturer<tf::cublasFlowCapturer>(); auto alpha = capturer.single_task([=] __device__ () { *dAlpha = 1; }) .name("alpha=1"); auto beta = capturer.single_task([=] __device__ () { *dBeta = 0; }) .name("beta=0"); auto copyA = capturer.copy(dA, hA.data(), hA.size()).name("copyA"); auto copyB = capturer.copy(dB, hB.data(), hB.size()).name("copyB"); auto gemm = blas->c_gemm(CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, dAlpha, dA, K, dB, N, dBeta, dC, N ).name("C = alpha * A * B + beta * C"); auto copyC = capturer.copy(hC.data(), dC, hC.size()).name("copyC"); gemm.succeed(alpha, beta, copyA, copyB) .precede(copyC); capturer.dump(std::cout); // dump the graph constructed so far. }).name("cublasFlow"); cublasFlow.succeed( malloc_dA, malloc_dB, malloc_dC, malloc_dAlpha, malloc_dBeta ); executor.run(taskflow).wait(); taskflow.dump(std::cout); std::cout << "Matrix C:\n"; for(int m=0; m<M; m++) { for(int n=0; n<N; n++) { std::cout << hC[m*N+n] << ' '; } std::cout << '\n'; } return 0; }
17b00ef5c3b321ab0f6f6de662ceffaf4940197c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define N 16 #include<stdio.h> #include<stdlib.h> __global__ void add(int *a, int *b, int *c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; printf("%d blockIdx=%d\n", c[blockIdx.x], blockIdx.x); } void random_ints(int *array, int size){ int i; for(i = 0; i < size; i++) array[i] = rand() % 10; } int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i; a = (int *) malloc(size); b = (int *) malloc(size); c = (int *) malloc(size); random_ints(a, N); random_ints(b, N); random_ints(c, N); hipMalloc((void **) &d_a, size); hipMalloc((void **) &d_b, size); hipMalloc((void **) &d_c, size); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, d_a, d_b, d_c); hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); printf("\n\n\n"); for(i = 0; i < N; i++) printf("%d index=%d\n", c[i], i); free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
17b00ef5c3b321ab0f6f6de662ceffaf4940197c.cu
#define N 16 #include<stdio.h> #include<stdlib.h> __global__ void add(int *a, int *b, int *c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; printf("%d blockIdx=%d\n", c[blockIdx.x], blockIdx.x); } void random_ints(int *array, int size){ int i; for(i = 0; i < size; i++) array[i] = rand() % 10; } int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i; a = (int *) malloc(size); b = (int *) malloc(size); c = (int *) malloc(size); random_ints(a, N); random_ints(b, N); random_ints(c, N); cudaMalloc((void **) &d_a, size); cudaMalloc((void **) &d_b, size); cudaMalloc((void **) &d_c, size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); add<<<N, 1>>>(d_a, d_b, d_c); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); printf("\n\n\n"); for(i = 0; i < N; i++) printf("%d index=%d\n", c[i], i); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
19d8acb83811b465bd6e848b99ad0d5dac6f51e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void kernCalcCornerBlockHist( unsigned char * src, int rows, int cols, int beginX, int beginY, unsigned int * outHist ) { int tid = cols * (threadIdx.y + beginY) + (threadIdx.x + beginX); atomicAdd(&(outHist[src[tid]]), 1); }
19d8acb83811b465bd6e848b99ad0d5dac6f51e7.cu
__global__ void kernCalcCornerBlockHist( unsigned char * src, int rows, int cols, int beginX, int beginY, unsigned int * outHist ) { int tid = cols * (threadIdx.y + beginY) + (threadIdx.x + beginX); atomicAdd(&(outHist[src[tid]]), 1); }
5df3ea216f787ef5e3b70e812d55829efae2eb9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/sequence_scale.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" namespace paddle { namespace operators { namespace math { using platform::PADDLE_CUDA_NUM_THREADS; template <typename T, int BlockSize> __global__ void SequenceScaleKernel(T* seq, size_t* lod, const T* scales, const size_t seq_width) { for (int i = threadIdx.x; i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * seq_width; i += BlockSize) { int idx = lod[blockIdx.x] * seq_width + i; seq[idx] *= scales[blockIdx.x]; } } template <typename T> class ScaleLoDTensorFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const T* scales, framework::LoDTensor* seq) { const size_t level = 0; auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; const size_t seq_width = seq->numel() / seq->dims()[0]; auto abs_offset_lod = framework::ToAbsOffset(lod); T* seq_data = seq->mutable_data<T>(context.GetPlace()); paddle::framework::MixVector<size_t> mix_vector(&(abs_offset_lod[level])); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( HIP_KERNEL_NAME(SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>), dim3(num_seq), dim3(PADDLE_CUDA_NUM_THREADS), 0, context.stream(), seq_data, mix_vector.CUDAMutableData(context.GetPlace()), scales, seq_width); #else hipLaunchKernelGGL(( SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>), dim3(num_seq), dim3(PADDLE_CUDA_NUM_THREADS), 0, context.stream(), seq_data, mix_vector.CUDAMutableData(context.GetPlace()), scales, seq_width); #endif mix_vector.CopyToCPU(); } }; template <typename T> class ScaleLoDTensorFunctor<phi::GPUContext, T> { public: void operator()(const phi::GPUContext& context, const T* scales, framework::LoDTensor* seq) { const size_t level = 0; auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; const size_t seq_width = seq->numel() / seq->dims()[0]; auto abs_offset_lod = framework::ToAbsOffset(lod); T* seq_data = seq->mutable_data<T>(context.GetPlace()); paddle::framework::MixVector<size_t> mix_vector(&(abs_offset_lod[level])); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( HIP_KERNEL_NAME(SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>), dim3(num_seq), dim3(PADDLE_CUDA_NUM_THREADS), 0, context.stream(), seq_data, mix_vector.CUDAMutableData(context.GetPlace()), scales, seq_width); #else hipLaunchKernelGGL(( SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>), dim3(num_seq), dim3(PADDLE_CUDA_NUM_THREADS), 0, context.stream(), seq_data, mix_vector.CUDAMutableData(context.GetPlace()), scales, seq_width); #endif mix_vector.CopyToCPU(); } }; template class ScaleLoDTensorFunctor<platform::CUDADeviceContext, float>; template class ScaleLoDTensorFunctor<platform::CUDADeviceContext, double>; template class ScaleLoDTensorFunctor<phi::GPUContext, float>; template class ScaleLoDTensorFunctor<phi::GPUContext, double>; } // namespace math } // namespace operators } // namespace paddle
5df3ea216f787ef5e3b70e812d55829efae2eb9a.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/sequence_scale.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" namespace paddle { namespace operators { namespace math { using platform::PADDLE_CUDA_NUM_THREADS; template <typename T, int BlockSize> __global__ void SequenceScaleKernel(T* seq, size_t* lod, const T* scales, const size_t seq_width) { for (int i = threadIdx.x; i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * seq_width; i += BlockSize) { int idx = lod[blockIdx.x] * seq_width + i; seq[idx] *= scales[blockIdx.x]; } } template <typename T> class ScaleLoDTensorFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const T* scales, framework::LoDTensor* seq) { const size_t level = 0; auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; const size_t seq_width = seq->numel() / seq->dims()[0]; auto abs_offset_lod = framework::ToAbsOffset(lod); T* seq_data = seq->mutable_data<T>(context.GetPlace()); paddle::framework::MixVector<size_t> mix_vector(&(abs_offset_lod[level])); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( HIP_KERNEL_NAME(SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>), dim3(num_seq), dim3(PADDLE_CUDA_NUM_THREADS), 0, context.stream(), seq_data, mix_vector.CUDAMutableData(context.GetPlace()), scales, seq_width); #else SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS><<< num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>( seq_data, mix_vector.CUDAMutableData(context.GetPlace()), scales, seq_width); #endif mix_vector.CopyToCPU(); } }; template <typename T> class ScaleLoDTensorFunctor<phi::GPUContext, T> { public: void operator()(const phi::GPUContext& context, const T* scales, framework::LoDTensor* seq) { const size_t level = 0; auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; const size_t seq_width = seq->numel() / seq->dims()[0]; auto abs_offset_lod = framework::ToAbsOffset(lod); T* seq_data = seq->mutable_data<T>(context.GetPlace()); paddle::framework::MixVector<size_t> mix_vector(&(abs_offset_lod[level])); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( HIP_KERNEL_NAME(SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>), dim3(num_seq), dim3(PADDLE_CUDA_NUM_THREADS), 0, context.stream(), seq_data, mix_vector.CUDAMutableData(context.GetPlace()), scales, seq_width); #else SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS><<< num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>( seq_data, mix_vector.CUDAMutableData(context.GetPlace()), scales, seq_width); #endif mix_vector.CopyToCPU(); } }; template class ScaleLoDTensorFunctor<platform::CUDADeviceContext, float>; template class ScaleLoDTensorFunctor<platform::CUDADeviceContext, double>; template class ScaleLoDTensorFunctor<phi::GPUContext, float>; template class ScaleLoDTensorFunctor<phi::GPUContext, double>; } // namespace math } // namespace operators } // namespace paddle
2e7d301719e0e0832cea5816569eef05eb896249.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by chnlkw on 11/14/17. // #include "cuda_utils.h" #ifdef USE_ROCM #define REPORT_CUDA_SUCCESS 0 bool cudaEnsureSuccess(hipError_t status, const char *status_context_description, bool die_on_error, const char *filename, unsigned line_number) { if (status_context_description == NULL) status_context_description = ""; if (status == hipSuccess) { #if REPORT_CUDA_SUCCESS std::cerr << "Succeeded: " << status_context_description << std::endl << std::flush; #endif return true; } const char *errorString = hipGetErrorString(status); std::cerr << "CUDA Error: "; if (status_context_description != NULL) { std::cerr << status_context_description << ": "; } if (errorString != NULL) { std::cerr << errorString; } else { std::cerr << "(Unknown CUDA status code " << status << ")"; } if (filename != NULL) { std::cerr << " at " << filename << ":" << line_number; } std::cerr << std::endl << std::flush; if (die_on_error) { abort(); //exit(EXIT_FAILURE); // ... or cerr << "FATAL ERROR" << etc. etc. } return false; } template <class T> __global__ void copy_kernel(T* dst, T* src, size_t N) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) // for (int i = threadIdx.x; i <cnt; i += blockDim.x) dst[i] = src[i]; // memcpy(dst, src, bytes); __syncthreads(); } __global__ void free_kernel(void* src) { if (blockIdx.x == 0 && threadIdx.x == 0) free(src); } void run_copy_kernel(void* dst, void* src, size_t bytes, hipStream_t stream) { if (bytes % sizeof(int) == 0) { size_t N = bytes / sizeof(int); copy_kernel<int> << < (N + 1023) / 1024, 1024, 0, stream >> > ((int *) dst, (int *) src, bytes / sizeof(int)); } else { copy_kernel<char> << < (bytes + 1023) / 1024, 1024, 0, stream >> > ((char *) dst, (char *) src, bytes); } } void run_copy_free_kernel(void* dst, void* src, size_t bytes, hipStream_t stream) { run_copy_kernel(dst, src, bytes, stream); hipLaunchKernelGGL(( free_kernel), dim3(1), dim3(1), 0, stream, src); } #endif
2e7d301719e0e0832cea5816569eef05eb896249.cu
// // Created by chnlkw on 11/14/17. // #include "cuda_utils.h" #ifdef USE_CUDA #define REPORT_CUDA_SUCCESS 0 bool cudaEnsureSuccess(cudaError_t status, const char *status_context_description, bool die_on_error, const char *filename, unsigned line_number) { if (status_context_description == NULL) status_context_description = ""; if (status == cudaSuccess) { #if REPORT_CUDA_SUCCESS std::cerr << "Succeeded: " << status_context_description << std::endl << std::flush; #endif return true; } const char *errorString = cudaGetErrorString(status); std::cerr << "CUDA Error: "; if (status_context_description != NULL) { std::cerr << status_context_description << ": "; } if (errorString != NULL) { std::cerr << errorString; } else { std::cerr << "(Unknown CUDA status code " << status << ")"; } if (filename != NULL) { std::cerr << " at " << filename << ":" << line_number; } std::cerr << std::endl << std::flush; if (die_on_error) { abort(); //exit(EXIT_FAILURE); // ... or cerr << "FATAL ERROR" << etc. etc. } return false; } template <class T> __global__ void copy_kernel(T* dst, T* src, size_t N) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) // for (int i = threadIdx.x; i <cnt; i += blockDim.x) dst[i] = src[i]; // memcpy(dst, src, bytes); __syncthreads(); } __global__ void free_kernel(void* src) { if (blockIdx.x == 0 && threadIdx.x == 0) free(src); } void run_copy_kernel(void* dst, void* src, size_t bytes, cudaStream_t stream) { if (bytes % sizeof(int) == 0) { size_t N = bytes / sizeof(int); copy_kernel<int> << < (N + 1023) / 1024, 1024, 0, stream >> > ((int *) dst, (int *) src, bytes / sizeof(int)); } else { copy_kernel<char> << < (bytes + 1023) / 1024, 1024, 0, stream >> > ((char *) dst, (char *) src, bytes); } } void run_copy_free_kernel(void* dst, void* src, size_t bytes, cudaStream_t stream) { run_copy_kernel(dst, src, bytes, stream); free_kernel<<<1, 1, 0, stream>>>(src); } #endif
38b7acf3b52caa1c5b86a7fd22e0a7f2e32f7fb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2023 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #include "HarmonicDihedralForceGPU.cuh" #include "hoomd/TextureTools.h" #include <assert.h> #if HOOMD_LONGREAL_SIZE == 32 #define __scalar2int_rn __float2int_rn #else #define __scalar2int_rn __double2int_rn #endif /*! \file HarmonicDihedralForceGPU.cu \brief Defines GPU kernel code for calculating the harmonic dihedral forces. Used by HarmonicDihedralForceComputeGPU. */ namespace hoomd { namespace md { namespace kernel { //! Kernel for calculating harmonic dihedral forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the device \param d_params Parameters for the angle force \param box Box dimensions for periodic boundary condition handling \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom */ __global__ void gpu_compute_harmonic_dihedral_forces_kernel(Scalar4* d_force, Scalar* d_virial, const size_t virial_pitch, const unsigned int N, const Scalar4* d_pos, const Scalar4* d_params, BoxDim box, const group_storage<4>* tlist, const unsigned int* dihedral_ABCD, const unsigned int pitch, const unsigned int* n_dihedrals_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_dihedrals = n_dihedrals_list[idx]; // read in the position of our b-particle from the a-b-c-d set. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c-d quartet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 pos_a, pos_b, pos_c, pos_d; // allocate space for the a,b, and c atoms in the a-b-c-d quartet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); // initialize the virial to 0 Scalar virial_idx[6]; for (unsigned int i = 0; i < 6; i++) virial_idx[i] = Scalar(0.0); // loop over all dihedrals for (int dihedral_idx = 0; dihedral_idx < n_dihedrals; dihedral_idx++) { group_storage<4> cur_dihedral = tlist[pitch * dihedral_idx + idx]; unsigned int cur_ABCD = dihedral_ABCD[pitch * dihedral_idx + idx]; int cur_dihedral_x_idx = cur_dihedral.idx[0]; int cur_dihedral_y_idx = cur_dihedral.idx[1]; int cur_dihedral_z_idx = cur_dihedral.idx[2]; int cur_dihedral_type = cur_dihedral.idx[3]; int cur_dihedral_abcd = cur_ABCD; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_dihedral_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_dihedral_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 z_postype = d_pos[cur_dihedral_z_idx]; Scalar3 z_pos = make_scalar3(z_postype.x, z_postype.y, z_postype.z); if (cur_dihedral_abcd == 0) { pos_a = idx_pos; pos_b = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 1) { pos_b = idx_pos; pos_a = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 2) { pos_c = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 3) { pos_d = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_c = z_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = pos_a - pos_b; Scalar3 dcb = pos_c - pos_b; Scalar3 ddc = pos_d - pos_c; dab = box.minImage(dab); dcb = box.minImage(dcb); ddc = box.minImage(ddc); Scalar3 dcbm = -dcb; dcbm = box.minImage(dcbm); // get the dihedral parameters (MEM TRANSFER: 12 bytes) Scalar4 params = __ldg(d_params + cur_dihedral_type); Scalar K = params.x; Scalar sign = params.y; Scalar multi = params.z; Scalar phi_0 = params.w; Scalar aax = dab.y * dcbm.z - dab.z * dcbm.y; Scalar aay = dab.z * dcbm.x - dab.x * dcbm.z; Scalar aaz = dab.x * dcbm.y - dab.y * dcbm.x; Scalar bbx = ddc.y * dcbm.z - ddc.z * dcbm.y; Scalar bby = ddc.z * dcbm.x - ddc.x * dcbm.z; Scalar bbz = ddc.x * dcbm.y - ddc.y * dcbm.x; Scalar raasq = aax * aax + aay * aay + aaz * aaz; Scalar rbbsq = bbx * bbx + bby * bby + bbz * bbz; Scalar rgsq = dcbm.x * dcbm.x + dcbm.y * dcbm.y + dcbm.z * dcbm.z; Scalar rg = sqrtf(rgsq); Scalar rginv, raa2inv, rbb2inv; rginv = raa2inv = rbb2inv = Scalar(0.0); if (rg > Scalar(0.0)) rginv = Scalar(1.0) / rg; if (raasq > Scalar(0.0)) raa2inv = Scalar(1.0) / raasq; if (rbbsq > Scalar(0.0)) rbb2inv = Scalar(1.0) / rbbsq; Scalar rabinv = sqrtf(raa2inv * rbb2inv); Scalar c_abcd = (aax * bbx + aay * bby + aaz * bbz) * rabinv; Scalar s_abcd = rg * rabinv * (aax * ddc.x + aay * ddc.y + aaz * ddc.z); if (c_abcd > Scalar(1.0)) c_abcd = Scalar(1.0); if (c_abcd < -Scalar(1.0)) c_abcd = -Scalar(1.0); Scalar p = Scalar(1.0); Scalar ddfab; Scalar dfab = Scalar(0.0); int m = __scalar2int_rn(multi); for (int jj = 0; jj < m; jj++) { ddfab = p * c_abcd - dfab * s_abcd; dfab = p * s_abcd + dfab * c_abcd; p = ddfab; } ///////////////////////// // FROM LAMMPS: sin_shift is always 0... so dropping all sin_shift terms!!!! // Adding charmm dihedral functionality, sin_shift not always 0, // cos_shift not always 1 ///////////////////////// Scalar sin_phi_0 = fast::sin(phi_0); Scalar cos_phi_0 = fast::cos(phi_0); p = p * cos_phi_0 + dfab * sin_phi_0; p *= sign; dfab = dfab * cos_phi_0 - ddfab * sin_phi_0; dfab *= sign; dfab *= -multi; p += Scalar(1.0); if (multi < Scalar(1.0)) { p = Scalar(1.0) + sign; dfab = Scalar(0.0); } Scalar fg = dab.x * dcbm.x + dab.y * dcbm.y + dab.z * dcbm.z; Scalar hg = ddc.x * dcbm.x + ddc.y * dcbm.y + ddc.z * dcbm.z; Scalar fga = fg * raa2inv * rginv; Scalar hgb = hg * rbb2inv * rginv; Scalar gaa = -raa2inv * rg; Scalar gbb = rbb2inv * rg; Scalar dtfx = gaa * aax; Scalar dtfy = gaa * aay; Scalar dtfz = gaa * aaz; Scalar dtgx = fga * aax - hgb * bbx; Scalar dtgy = fga * aay - hgb * bby; Scalar dtgz = fga * aaz - hgb * bbz; Scalar dthx = gbb * bbx; Scalar dthy = gbb * bby; Scalar dthz = gbb * bbz; // Scalar df = -K * dfab; Scalar df = -K * dfab * Scalar(0.500); // the 0.5 term is for 1/2K in the forces Scalar sx2 = df * dtgx; Scalar sy2 = df * dtgy; Scalar sz2 = df * dtgz; Scalar ffax = df * dtfx; Scalar ffay = df * dtfy; Scalar ffaz = df * dtfz; Scalar ffbx = sx2 - ffax; Scalar ffby = sy2 - ffay; Scalar ffbz = sz2 - ffaz; Scalar ffdx = df * dthx; Scalar ffdy = df * dthy; Scalar ffdz = df * dthz; Scalar ffcx = -sx2 - ffdx; Scalar ffcy = -sy2 - ffdy; Scalar ffcz = -sz2 - ffdz; // Now, apply the force to each individual atom a,b,c,d // and accumulate the energy/virial // compute 1/4 of the energy, 1/4 for each atom in the dihedral // Scalar dihedral_eng = p*K*Scalar(1.0/4.0); Scalar dihedral_eng = p * K * Scalar(1.0 / 8.0); // the 1/8th term is (1/2)K * 1/4 // compute 1/4 of the virial, 1/4 for each atom in the dihedral // upper triangular version of virial tensor Scalar dihedral_virial[6]; dihedral_virial[0] = Scalar(1. / 4.) * (dab.x * ffax + dcb.x * ffcx + (ddc.x + dcb.x) * ffdx); dihedral_virial[1] = Scalar(1. / 4.) * (dab.y * ffax + dcb.y * ffcx + (ddc.y + dcb.y) * ffdx); dihedral_virial[2] = Scalar(1. / 4.) * (dab.z * ffax + dcb.z * ffcx + (ddc.z + dcb.z) * ffdx); dihedral_virial[3] = Scalar(1. / 4.) * (dab.y * ffay + dcb.y * ffcy + (ddc.y + dcb.y) * ffdy); dihedral_virial[4] = Scalar(1. / 4.) * (dab.z * ffay + dcb.z * ffcy + (ddc.z + dcb.z) * ffdy); dihedral_virial[5] = Scalar(1. / 4.) * (dab.z * ffaz + dcb.z * ffcz + (ddc.z + dcb.z) * ffdz); if (cur_dihedral_abcd == 0) { force_idx.x += ffax; force_idx.y += ffay; force_idx.z += ffaz; } if (cur_dihedral_abcd == 1) { force_idx.x += ffbx; force_idx.y += ffby; force_idx.z += ffbz; } if (cur_dihedral_abcd == 2) { force_idx.x += ffcx; force_idx.y += ffcy; force_idx.z += ffcz; } if (cur_dihedral_abcd == 3) { force_idx.x += ffdx; force_idx.y += ffdy; force_idx.z += ffdz; } force_idx.w += dihedral_eng; for (int k = 0; k < 6; k++) virial_idx[k] += dihedral_virial[k]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int k = 0; k < 6; k++) d_virial[k * virial_pitch + idx] = virial_idx[k]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the GPU \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom \param d_params K, sign,multiplicity params packed as padded Scalar4 variables \param n_dihedral_types Number of dihedral types in d_params \param block_size Block size to use when performing calculations \param compute_capability Compute capability of the device (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize() \a d_params should include one Scalar4 element per dihedral type. The x component contains K the spring constant and the y component contains sign, and the z component the multiplicity. */ hipError_t gpu_compute_harmonic_dihedral_forces(Scalar4* d_force, Scalar* d_virial, const size_t virial_pitch, const unsigned int N, const Scalar4* d_pos, const BoxDim& box, const group_storage<4>* tlist, const unsigned int* dihedral_ABCD, const unsigned int pitch, const unsigned int* n_dihedrals_list, Scalar4* d_params, unsigned int n_dihedral_types, int block_size, int warp_size) { assert(d_params); unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_harmonic_dihedral_forces_kernel); max_block_size = attr.maxThreadsPerBlock; if (max_block_size % warp_size) // handle non-sensical return values from hipFuncGetAttributes max_block_size = (max_block_size / warp_size - 1) * warp_size; unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid(N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel hipLaunchKernelGGL((gpu_compute_harmonic_dihedral_forces_kernel), grid, threads, 0, 0, d_force, d_virial, virial_pitch, N, d_pos, d_params, box, tlist, dihedral_ABCD, pitch, n_dihedrals_list); return hipSuccess; } } // end namespace kernel } // end namespace md } // end namespace hoomd
38b7acf3b52caa1c5b86a7fd22e0a7f2e32f7fb9.cu
// Copyright (c) 2009-2023 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #include "HarmonicDihedralForceGPU.cuh" #include "hoomd/TextureTools.h" #include <assert.h> #if HOOMD_LONGREAL_SIZE == 32 #define __scalar2int_rn __float2int_rn #else #define __scalar2int_rn __double2int_rn #endif /*! \file HarmonicDihedralForceGPU.cu \brief Defines GPU kernel code for calculating the harmonic dihedral forces. Used by HarmonicDihedralForceComputeGPU. */ namespace hoomd { namespace md { namespace kernel { //! Kernel for calculating harmonic dihedral forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the device \param d_params Parameters for the angle force \param box Box dimensions for periodic boundary condition handling \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom */ __global__ void gpu_compute_harmonic_dihedral_forces_kernel(Scalar4* d_force, Scalar* d_virial, const size_t virial_pitch, const unsigned int N, const Scalar4* d_pos, const Scalar4* d_params, BoxDim box, const group_storage<4>* tlist, const unsigned int* dihedral_ABCD, const unsigned int pitch, const unsigned int* n_dihedrals_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_dihedrals = n_dihedrals_list[idx]; // read in the position of our b-particle from the a-b-c-d set. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c-d quartet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 pos_a, pos_b, pos_c, pos_d; // allocate space for the a,b, and c atoms in the a-b-c-d quartet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); // initialize the virial to 0 Scalar virial_idx[6]; for (unsigned int i = 0; i < 6; i++) virial_idx[i] = Scalar(0.0); // loop over all dihedrals for (int dihedral_idx = 0; dihedral_idx < n_dihedrals; dihedral_idx++) { group_storage<4> cur_dihedral = tlist[pitch * dihedral_idx + idx]; unsigned int cur_ABCD = dihedral_ABCD[pitch * dihedral_idx + idx]; int cur_dihedral_x_idx = cur_dihedral.idx[0]; int cur_dihedral_y_idx = cur_dihedral.idx[1]; int cur_dihedral_z_idx = cur_dihedral.idx[2]; int cur_dihedral_type = cur_dihedral.idx[3]; int cur_dihedral_abcd = cur_ABCD; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_dihedral_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_dihedral_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 z_postype = d_pos[cur_dihedral_z_idx]; Scalar3 z_pos = make_scalar3(z_postype.x, z_postype.y, z_postype.z); if (cur_dihedral_abcd == 0) { pos_a = idx_pos; pos_b = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 1) { pos_b = idx_pos; pos_a = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 2) { pos_c = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 3) { pos_d = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_c = z_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = pos_a - pos_b; Scalar3 dcb = pos_c - pos_b; Scalar3 ddc = pos_d - pos_c; dab = box.minImage(dab); dcb = box.minImage(dcb); ddc = box.minImage(ddc); Scalar3 dcbm = -dcb; dcbm = box.minImage(dcbm); // get the dihedral parameters (MEM TRANSFER: 12 bytes) Scalar4 params = __ldg(d_params + cur_dihedral_type); Scalar K = params.x; Scalar sign = params.y; Scalar multi = params.z; Scalar phi_0 = params.w; Scalar aax = dab.y * dcbm.z - dab.z * dcbm.y; Scalar aay = dab.z * dcbm.x - dab.x * dcbm.z; Scalar aaz = dab.x * dcbm.y - dab.y * dcbm.x; Scalar bbx = ddc.y * dcbm.z - ddc.z * dcbm.y; Scalar bby = ddc.z * dcbm.x - ddc.x * dcbm.z; Scalar bbz = ddc.x * dcbm.y - ddc.y * dcbm.x; Scalar raasq = aax * aax + aay * aay + aaz * aaz; Scalar rbbsq = bbx * bbx + bby * bby + bbz * bbz; Scalar rgsq = dcbm.x * dcbm.x + dcbm.y * dcbm.y + dcbm.z * dcbm.z; Scalar rg = sqrtf(rgsq); Scalar rginv, raa2inv, rbb2inv; rginv = raa2inv = rbb2inv = Scalar(0.0); if (rg > Scalar(0.0)) rginv = Scalar(1.0) / rg; if (raasq > Scalar(0.0)) raa2inv = Scalar(1.0) / raasq; if (rbbsq > Scalar(0.0)) rbb2inv = Scalar(1.0) / rbbsq; Scalar rabinv = sqrtf(raa2inv * rbb2inv); Scalar c_abcd = (aax * bbx + aay * bby + aaz * bbz) * rabinv; Scalar s_abcd = rg * rabinv * (aax * ddc.x + aay * ddc.y + aaz * ddc.z); if (c_abcd > Scalar(1.0)) c_abcd = Scalar(1.0); if (c_abcd < -Scalar(1.0)) c_abcd = -Scalar(1.0); Scalar p = Scalar(1.0); Scalar ddfab; Scalar dfab = Scalar(0.0); int m = __scalar2int_rn(multi); for (int jj = 0; jj < m; jj++) { ddfab = p * c_abcd - dfab * s_abcd; dfab = p * s_abcd + dfab * c_abcd; p = ddfab; } ///////////////////////// // FROM LAMMPS: sin_shift is always 0... so dropping all sin_shift terms!!!! // Adding charmm dihedral functionality, sin_shift not always 0, // cos_shift not always 1 ///////////////////////// Scalar sin_phi_0 = fast::sin(phi_0); Scalar cos_phi_0 = fast::cos(phi_0); p = p * cos_phi_0 + dfab * sin_phi_0; p *= sign; dfab = dfab * cos_phi_0 - ddfab * sin_phi_0; dfab *= sign; dfab *= -multi; p += Scalar(1.0); if (multi < Scalar(1.0)) { p = Scalar(1.0) + sign; dfab = Scalar(0.0); } Scalar fg = dab.x * dcbm.x + dab.y * dcbm.y + dab.z * dcbm.z; Scalar hg = ddc.x * dcbm.x + ddc.y * dcbm.y + ddc.z * dcbm.z; Scalar fga = fg * raa2inv * rginv; Scalar hgb = hg * rbb2inv * rginv; Scalar gaa = -raa2inv * rg; Scalar gbb = rbb2inv * rg; Scalar dtfx = gaa * aax; Scalar dtfy = gaa * aay; Scalar dtfz = gaa * aaz; Scalar dtgx = fga * aax - hgb * bbx; Scalar dtgy = fga * aay - hgb * bby; Scalar dtgz = fga * aaz - hgb * bbz; Scalar dthx = gbb * bbx; Scalar dthy = gbb * bby; Scalar dthz = gbb * bbz; // Scalar df = -K * dfab; Scalar df = -K * dfab * Scalar(0.500); // the 0.5 term is for 1/2K in the forces Scalar sx2 = df * dtgx; Scalar sy2 = df * dtgy; Scalar sz2 = df * dtgz; Scalar ffax = df * dtfx; Scalar ffay = df * dtfy; Scalar ffaz = df * dtfz; Scalar ffbx = sx2 - ffax; Scalar ffby = sy2 - ffay; Scalar ffbz = sz2 - ffaz; Scalar ffdx = df * dthx; Scalar ffdy = df * dthy; Scalar ffdz = df * dthz; Scalar ffcx = -sx2 - ffdx; Scalar ffcy = -sy2 - ffdy; Scalar ffcz = -sz2 - ffdz; // Now, apply the force to each individual atom a,b,c,d // and accumulate the energy/virial // compute 1/4 of the energy, 1/4 for each atom in the dihedral // Scalar dihedral_eng = p*K*Scalar(1.0/4.0); Scalar dihedral_eng = p * K * Scalar(1.0 / 8.0); // the 1/8th term is (1/2)K * 1/4 // compute 1/4 of the virial, 1/4 for each atom in the dihedral // upper triangular version of virial tensor Scalar dihedral_virial[6]; dihedral_virial[0] = Scalar(1. / 4.) * (dab.x * ffax + dcb.x * ffcx + (ddc.x + dcb.x) * ffdx); dihedral_virial[1] = Scalar(1. / 4.) * (dab.y * ffax + dcb.y * ffcx + (ddc.y + dcb.y) * ffdx); dihedral_virial[2] = Scalar(1. / 4.) * (dab.z * ffax + dcb.z * ffcx + (ddc.z + dcb.z) * ffdx); dihedral_virial[3] = Scalar(1. / 4.) * (dab.y * ffay + dcb.y * ffcy + (ddc.y + dcb.y) * ffdy); dihedral_virial[4] = Scalar(1. / 4.) * (dab.z * ffay + dcb.z * ffcy + (ddc.z + dcb.z) * ffdy); dihedral_virial[5] = Scalar(1. / 4.) * (dab.z * ffaz + dcb.z * ffcz + (ddc.z + dcb.z) * ffdz); if (cur_dihedral_abcd == 0) { force_idx.x += ffax; force_idx.y += ffay; force_idx.z += ffaz; } if (cur_dihedral_abcd == 1) { force_idx.x += ffbx; force_idx.y += ffby; force_idx.z += ffbz; } if (cur_dihedral_abcd == 2) { force_idx.x += ffcx; force_idx.y += ffcy; force_idx.z += ffcz; } if (cur_dihedral_abcd == 3) { force_idx.x += ffdx; force_idx.y += ffdy; force_idx.z += ffdz; } force_idx.w += dihedral_eng; for (int k = 0; k < 6; k++) virial_idx[k] += dihedral_virial[k]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int k = 0; k < 6; k++) d_virial[k * virial_pitch + idx] = virial_idx[k]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the GPU \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom \param d_params K, sign,multiplicity params packed as padded Scalar4 variables \param n_dihedral_types Number of dihedral types in d_params \param block_size Block size to use when performing calculations \param compute_capability Compute capability of the device (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize() \a d_params should include one Scalar4 element per dihedral type. The x component contains K the spring constant and the y component contains sign, and the z component the multiplicity. */ hipError_t gpu_compute_harmonic_dihedral_forces(Scalar4* d_force, Scalar* d_virial, const size_t virial_pitch, const unsigned int N, const Scalar4* d_pos, const BoxDim& box, const group_storage<4>* tlist, const unsigned int* dihedral_ABCD, const unsigned int pitch, const unsigned int* n_dihedrals_list, Scalar4* d_params, unsigned int n_dihedral_types, int block_size, int warp_size) { assert(d_params); unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_harmonic_dihedral_forces_kernel); max_block_size = attr.maxThreadsPerBlock; if (max_block_size % warp_size) // handle non-sensical return values from hipFuncGetAttributes max_block_size = (max_block_size / warp_size - 1) * warp_size; unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid(N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel hipLaunchKernelGGL((gpu_compute_harmonic_dihedral_forces_kernel), grid, threads, 0, 0, d_force, d_virial, virial_pitch, N, d_pos, d_params, box, tlist, dihedral_ABCD, pitch, n_dihedrals_list); return hipSuccess; } } // end namespace kernel } // end namespace md } // end namespace hoomd
8111a147e816d597ec732a35ac6913ce243c3209.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/reduce.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" using namespace cv::gpu; typedef unsigned char uchar; typedef unsigned short ushort; ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing namespace cv { namespace gpu { namespace device { namespace imgproc { __device__ __forceinline__ float norm2(const float& v) { return v*v; } __device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; } __device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; } __device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; } template<typename T, typename B> __global__ void nlm_kernel(const PtrStep<T> src, PtrStepSz<T> dst, const B b, int search_radius, int block_radius, float noise_mult) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type; const int i = blockDim.y * blockIdx.y + threadIdx.y; const int j = blockDim.x * blockIdx.x + threadIdx.x; if (j >= dst.cols || i >= dst.rows) return; int bsize = search_radius + block_radius; int search_window = 2 * search_radius + 1; float minus_search_window2_inv = -1.f/(search_window * search_window); value_type sum1 = VecTraits<value_type>::all(0); float sum2 = 0.f; if (j - bsize >= 0 && j + bsize < dst.cols && i - bsize >= 0 && i + bsize < dst.rows) { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(src(i + y + ty, j + x + tx)); value_type av = saturate_cast<value_type>(src(i + ty, j + tx)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); /*if (i == 255 && j == 255) printf("%f %f\n", w, dist2 * minus_h2_inv + (x * x + y * y) * minus_search_window2_inv);*/ sum1 = sum1 + w * saturate_cast<value_type>(src(i + y, j + x)); sum2 += w; } } else { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(b.at(i + y + ty, j + x + tx, src)); value_type av = saturate_cast<value_type>(b.at(i + ty, j + tx, src)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); sum1 = sum1 + w * saturate_cast<value_type>(b.at(i + y, j + x, src)); sum2 += w; } } dst(i, j) = saturate_cast<T>(sum1 / sum2); } template<typename T, template <typename> class B> void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, hipStream_t stream) { dim3 block (32, 8); dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y)); B<T> b(src.rows, src.cols); int block_window = 2 * block_radius + 1; float minus_h2_inv = -1.f/(h * h * VecTraits<T>::cn); float noise_mult = minus_h2_inv/(block_window * block_window); cudaSafeCall( hipFuncSetCacheConfig (nlm_kernel<T, B<T> >, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( nlm_kernel), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, noise_mult); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template<typename T> void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, hipStream_t stream) { typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, hipStream_t stream); static func_t funcs[] = { nlm_caller<T, BrdReflect101>, nlm_caller<T, BrdReplicate>, nlm_caller<T, BrdConstant>, nlm_caller<T, BrdReflect>, nlm_caller<T, BrdWrap>, }; funcs[borderMode](src, dst, search_radius, block_radius, h, stream); } template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t); template void nlm_bruteforce_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t); template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t); } }}} ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing (fast approximate version) namespace cv { namespace gpu { namespace device { namespace imgproc { template <int cn> struct Unroll; template <> struct Unroll<1> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&> tie(float& val1, float& val2) { return thrust::tie(val1, val2); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op); } }; template <> struct Unroll<2> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&> tie(float& val1, float2& val2) { return thrust::tie(val1, val2.x, val2.y); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op); } }; template <> struct Unroll<3> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&> tie(float& val1, float3& val2) { return thrust::tie(val1, val2.x, val2.y, val2.z); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op, op); } }; template <> struct Unroll<4> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE, smem + 4 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&, float&> tie(float& val1, float4& val2) { return thrust::tie(val1, val2.x, val2.y, val2.z, val2.w); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op, op, op); } }; __device__ __forceinline__ int calcDist(const uchar& a, const uchar& b) { return (a-b)*(a-b); } __device__ __forceinline__ int calcDist(const uchar2& a, const uchar2& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y); } __device__ __forceinline__ int calcDist(const uchar3& a, const uchar3& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z); } template <class T> struct FastNonLocalMeans { enum { CTA_SIZE = 128, TILE_COLS = 128, TILE_ROWS = 32, STRIDE = CTA_SIZE }; struct plus { __device__ __forceinline__ float operator()(float v1, float v2) const { return v1 + v2; } }; int search_radius; int block_radius; int search_window; int block_window; float minus_h2_inv; FastNonLocalMeans(int search_window_, int block_window_, float h) : search_radius(search_window_/2), block_radius(block_window_/2), search_window(search_window_), block_window(block_window_), minus_h2_inv(-1.f/(h * h * VecTraits<T>::cn)) {} PtrStep<T> src; mutable PtrStepi buffer; __device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { dist_sums[index] = 0; for(int tx = 0; tx < block_window; ++tx) col_sums(tx, index) = 0; int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j; int by = i + y - search_radius; int bx = j + x - search_radius; #if 1 for (int tx = -block_radius; tx <= block_radius; ++tx) { int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sum += dist; } col_sums(tx + block_radius, index) = col_sum; } #else for (int ty = -block_radius; ty <= block_radius; ++ty) for (int tx = -block_radius; tx <= block_radius; ++tx) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sums(tx + block_radius, index) += dist; } #endif up_col_sums(j, index) = col_sums(block_window - 1, index); } } __device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j + block_radius; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) col_sum += calcDist(src(ay + ty, ax), src(by + ty, bx)); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { int ay = i; int ax = j + block_radius; T a_up = src(ay - block_radius - 1, ax); T a_down = src(ay + block_radius, ax); for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; T b_up = src(by - block_radius - 1, bx); T b_down = src(by + block_radius, bx); int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, T& dst) const { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_type; float weights_sum = 0; sum_type sum = VecTraits<sum_type>::all(0); float bw2_inv = 1.f/(block_window * block_window); int sx = j - search_radius; int sy = i - search_radius; for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; float avg_dist = dist_sums[index] * bw2_inv; float weight = __expf(avg_dist * minus_h2_inv); weights_sum += weight; sum = sum + weight * saturate_cast<sum_type>(src(sy + y, sx + x)); } __shared__ float cta_buffer[CTA_SIZE * (VecTraits<T>::cn + 1)]; reduce<CTA_SIZE>(Unroll<VecTraits<T>::cn>::template smem_tuple<CTA_SIZE>(cta_buffer), Unroll<VecTraits<T>::cn>::tie(weights_sum, sum), threadIdx.x, Unroll<VecTraits<T>::cn>::op()); if (threadIdx.x == 0) dst = saturate_cast<T>(sum / weights_sum); } __device__ __forceinline__ void operator()(PtrStepSz<T>& dst) const { int tbx = blockIdx.x * TILE_COLS; int tby = blockIdx.y * TILE_ROWS; int tex = ::min(tbx + TILE_COLS, dst.cols); int tey = ::min(tby + TILE_ROWS, dst.rows); PtrStepi col_sums; col_sums.data = buffer.ptr(dst.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window; col_sums.step = buffer.step; PtrStepi up_col_sums; up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window; up_col_sums.step = buffer.step; extern __shared__ int dist_sums[]; //search_window * search_window int first = 0; for (int i = tby; i < tey; ++i) for (int j = tbx; j < tex; ++j) { __syncthreads(); if (j == tbx) { initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums); first = 0; } else { if (i == tby) shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums); else shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums); first = (first + 1) % block_window; } __syncthreads(); convolve_window(i, j, dist_sums, dst(i, j)); } } }; template<typename T> __global__ void fast_nlm_kernel(const FastNonLocalMeans<T> fnlm, PtrStepSz<T> dst) { fnlm(dst); } void nln_fast_get_buffer_size(const PtrStepSzb& src, int search_window, int block_window, int& buffer_cols, int& buffer_rows) { typedef FastNonLocalMeans<uchar> FNLM; dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); buffer_cols = search_window * search_window * grid.y; buffer_rows = src.cols + block_window * grid.x; } template<typename T> void nlm_fast_gpu(const PtrStepSzb& src, PtrStepSzb dst, PtrStepi buffer, int search_window, int block_window, float h, hipStream_t stream) { typedef FastNonLocalMeans<T> FNLM; FNLM fnlm(search_window, block_window, h); fnlm.src = (PtrStepSz<T>)src; fnlm.buffer = buffer; dim3 block(FNLM::CTA_SIZE, 1); dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); int smem = search_window * search_window * sizeof(int); hipLaunchKernelGGL(( fast_nlm_kernel), dim3(grid), dim3(block), smem, 0, fnlm, (PtrStepSz<T>)dst); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void nlm_fast_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t); template void nlm_fast_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t); template void nlm_fast_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t); __global__ void fnlm_split_kernel(const PtrStepSz<uchar3> lab, PtrStepb l, PtrStep<uchar2> ab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar3 p = lab(y, x); ab(y,x) = make_uchar2(p.y, p.z); l(y,x) = p.x; } } void fnlm_split_channels(const PtrStepSz<uchar3>& lab, PtrStepb l, PtrStep<uchar2> ab, hipStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); hipLaunchKernelGGL(( fnlm_split_kernel), dim3(g), dim3(b), 0, 0, lab, l, ab); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } __global__ void fnlm_merge_kernel(const PtrStepb l, const PtrStep<uchar2> ab, PtrStepSz<uchar3> lab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar2 p = ab(y, x); lab(y, x) = make_uchar3(l(y, x), p.x, p.y); } } void fnlm_merge_channels(const PtrStepb& l, const PtrStep<uchar2>& ab, PtrStepSz<uchar3> lab, hipStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); hipLaunchKernelGGL(( fnlm_merge_kernel), dim3(g), dim3(b), 0, 0, l, ab, lab); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } } }}} #endif /* CUDA_DISABLER */
8111a147e816d597ec732a35ac6913ce243c3209.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/reduce.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" using namespace cv::gpu; typedef unsigned char uchar; typedef unsigned short ushort; ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing namespace cv { namespace gpu { namespace device { namespace imgproc { __device__ __forceinline__ float norm2(const float& v) { return v*v; } __device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; } __device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; } __device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; } template<typename T, typename B> __global__ void nlm_kernel(const PtrStep<T> src, PtrStepSz<T> dst, const B b, int search_radius, int block_radius, float noise_mult) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type; const int i = blockDim.y * blockIdx.y + threadIdx.y; const int j = blockDim.x * blockIdx.x + threadIdx.x; if (j >= dst.cols || i >= dst.rows) return; int bsize = search_radius + block_radius; int search_window = 2 * search_radius + 1; float minus_search_window2_inv = -1.f/(search_window * search_window); value_type sum1 = VecTraits<value_type>::all(0); float sum2 = 0.f; if (j - bsize >= 0 && j + bsize < dst.cols && i - bsize >= 0 && i + bsize < dst.rows) { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(src(i + y + ty, j + x + tx)); value_type av = saturate_cast<value_type>(src(i + ty, j + tx)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); /*if (i == 255 && j == 255) printf("%f %f\n", w, dist2 * minus_h2_inv + (x * x + y * y) * minus_search_window2_inv);*/ sum1 = sum1 + w * saturate_cast<value_type>(src(i + y, j + x)); sum2 += w; } } else { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(b.at(i + y + ty, j + x + tx, src)); value_type av = saturate_cast<value_type>(b.at(i + ty, j + tx, src)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); sum1 = sum1 + w * saturate_cast<value_type>(b.at(i + y, j + x, src)); sum2 += w; } } dst(i, j) = saturate_cast<T>(sum1 / sum2); } template<typename T, template <typename> class B> void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream) { dim3 block (32, 8); dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y)); B<T> b(src.rows, src.cols); int block_window = 2 * block_radius + 1; float minus_h2_inv = -1.f/(h * h * VecTraits<T>::cn); float noise_mult = minus_h2_inv/(block_window * block_window); cudaSafeCall( cudaFuncSetCacheConfig (nlm_kernel<T, B<T> >, cudaFuncCachePreferL1) ); nlm_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, noise_mult); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template<typename T> void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, cudaStream_t stream) { typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream); static func_t funcs[] = { nlm_caller<T, BrdReflect101>, nlm_caller<T, BrdReplicate>, nlm_caller<T, BrdConstant>, nlm_caller<T, BrdReflect>, nlm_caller<T, BrdWrap>, }; funcs[borderMode](src, dst, search_radius, block_radius, h, stream); } template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t); template void nlm_bruteforce_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t); template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t); } }}} ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing (fast approximate version) namespace cv { namespace gpu { namespace device { namespace imgproc { template <int cn> struct Unroll; template <> struct Unroll<1> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&> tie(float& val1, float& val2) { return thrust::tie(val1, val2); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op); } }; template <> struct Unroll<2> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&> tie(float& val1, float2& val2) { return thrust::tie(val1, val2.x, val2.y); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op); } }; template <> struct Unroll<3> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&> tie(float& val1, float3& val2) { return thrust::tie(val1, val2.x, val2.y, val2.z); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op, op); } }; template <> struct Unroll<4> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE, smem + 4 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&, float&> tie(float& val1, float4& val2) { return thrust::tie(val1, val2.x, val2.y, val2.z, val2.w); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op, op, op); } }; __device__ __forceinline__ int calcDist(const uchar& a, const uchar& b) { return (a-b)*(a-b); } __device__ __forceinline__ int calcDist(const uchar2& a, const uchar2& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y); } __device__ __forceinline__ int calcDist(const uchar3& a, const uchar3& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z); } template <class T> struct FastNonLocalMeans { enum { CTA_SIZE = 128, TILE_COLS = 128, TILE_ROWS = 32, STRIDE = CTA_SIZE }; struct plus { __device__ __forceinline__ float operator()(float v1, float v2) const { return v1 + v2; } }; int search_radius; int block_radius; int search_window; int block_window; float minus_h2_inv; FastNonLocalMeans(int search_window_, int block_window_, float h) : search_radius(search_window_/2), block_radius(block_window_/2), search_window(search_window_), block_window(block_window_), minus_h2_inv(-1.f/(h * h * VecTraits<T>::cn)) {} PtrStep<T> src; mutable PtrStepi buffer; __device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { dist_sums[index] = 0; for(int tx = 0; tx < block_window; ++tx) col_sums(tx, index) = 0; int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j; int by = i + y - search_radius; int bx = j + x - search_radius; #if 1 for (int tx = -block_radius; tx <= block_radius; ++tx) { int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sum += dist; } col_sums(tx + block_radius, index) = col_sum; } #else for (int ty = -block_radius; ty <= block_radius; ++ty) for (int tx = -block_radius; tx <= block_radius; ++tx) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sums(tx + block_radius, index) += dist; } #endif up_col_sums(j, index) = col_sums(block_window - 1, index); } } __device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j + block_radius; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) col_sum += calcDist(src(ay + ty, ax), src(by + ty, bx)); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { int ay = i; int ax = j + block_radius; T a_up = src(ay - block_radius - 1, ax); T a_down = src(ay + block_radius, ax); for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; T b_up = src(by - block_radius - 1, bx); T b_down = src(by + block_radius, bx); int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, T& dst) const { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_type; float weights_sum = 0; sum_type sum = VecTraits<sum_type>::all(0); float bw2_inv = 1.f/(block_window * block_window); int sx = j - search_radius; int sy = i - search_radius; for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; float avg_dist = dist_sums[index] * bw2_inv; float weight = __expf(avg_dist * minus_h2_inv); weights_sum += weight; sum = sum + weight * saturate_cast<sum_type>(src(sy + y, sx + x)); } __shared__ float cta_buffer[CTA_SIZE * (VecTraits<T>::cn + 1)]; reduce<CTA_SIZE>(Unroll<VecTraits<T>::cn>::template smem_tuple<CTA_SIZE>(cta_buffer), Unroll<VecTraits<T>::cn>::tie(weights_sum, sum), threadIdx.x, Unroll<VecTraits<T>::cn>::op()); if (threadIdx.x == 0) dst = saturate_cast<T>(sum / weights_sum); } __device__ __forceinline__ void operator()(PtrStepSz<T>& dst) const { int tbx = blockIdx.x * TILE_COLS; int tby = blockIdx.y * TILE_ROWS; int tex = ::min(tbx + TILE_COLS, dst.cols); int tey = ::min(tby + TILE_ROWS, dst.rows); PtrStepi col_sums; col_sums.data = buffer.ptr(dst.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window; col_sums.step = buffer.step; PtrStepi up_col_sums; up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window; up_col_sums.step = buffer.step; extern __shared__ int dist_sums[]; //search_window * search_window int first = 0; for (int i = tby; i < tey; ++i) for (int j = tbx; j < tex; ++j) { __syncthreads(); if (j == tbx) { initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums); first = 0; } else { if (i == tby) shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums); else shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums); first = (first + 1) % block_window; } __syncthreads(); convolve_window(i, j, dist_sums, dst(i, j)); } } }; template<typename T> __global__ void fast_nlm_kernel(const FastNonLocalMeans<T> fnlm, PtrStepSz<T> dst) { fnlm(dst); } void nln_fast_get_buffer_size(const PtrStepSzb& src, int search_window, int block_window, int& buffer_cols, int& buffer_rows) { typedef FastNonLocalMeans<uchar> FNLM; dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); buffer_cols = search_window * search_window * grid.y; buffer_rows = src.cols + block_window * grid.x; } template<typename T> void nlm_fast_gpu(const PtrStepSzb& src, PtrStepSzb dst, PtrStepi buffer, int search_window, int block_window, float h, cudaStream_t stream) { typedef FastNonLocalMeans<T> FNLM; FNLM fnlm(search_window, block_window, h); fnlm.src = (PtrStepSz<T>)src; fnlm.buffer = buffer; dim3 block(FNLM::CTA_SIZE, 1); dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); int smem = search_window * search_window * sizeof(int); fast_nlm_kernel<<<grid, block, smem>>>(fnlm, (PtrStepSz<T>)dst); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void nlm_fast_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t); template void nlm_fast_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t); template void nlm_fast_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t); __global__ void fnlm_split_kernel(const PtrStepSz<uchar3> lab, PtrStepb l, PtrStep<uchar2> ab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar3 p = lab(y, x); ab(y,x) = make_uchar2(p.y, p.z); l(y,x) = p.x; } } void fnlm_split_channels(const PtrStepSz<uchar3>& lab, PtrStepb l, PtrStep<uchar2> ab, cudaStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); fnlm_split_kernel<<<g, b>>>(lab, l, ab); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void fnlm_merge_kernel(const PtrStepb l, const PtrStep<uchar2> ab, PtrStepSz<uchar3> lab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar2 p = ab(y, x); lab(y, x) = make_uchar3(l(y, x), p.x, p.y); } } void fnlm_merge_channels(const PtrStepb& l, const PtrStep<uchar2>& ab, PtrStepSz<uchar3> lab, cudaStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); fnlm_merge_kernel<<<g, b>>>(l, ab, lab); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } }}} #endif /* CUDA_DISABLER */
7cbd723ea04194c3bd3d0b3b7b554628e624a6f4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void xsigny_update(const int n, const double *a, double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<n) { if (b[i]>0) {c[i]+=a[i];} else {if (b[i]<0) {c[i]-=a[i];} } } }
7cbd723ea04194c3bd3d0b3b7b554628e624a6f4.cu
#include "includes.h" extern "C" { } __global__ void xsigny_update(const int n, const double *a, double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<n) { if (b[i]>0) {c[i]+=a[i];} else {if (b[i]<0) {c[i]-=a[i];} } } }
4c94a3093346ebb43a297f640f16e7eeef3fa1a8.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/quantized/fake_quant_affine.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <thrust/tuple.h> #include <cmath> /* Fake quantize a tensor Args: output: output tensor. input : input tensor. sc: scale to quantize the input tensor to zero_point: zero_point quant_min: minimum quantized value quant_max: maximum quantized value Returns: Fake quantized tensor (float dtype). */ namespace at { namespace native { void fake_quantize_tensor_kernel_cuda( Tensor& output, const Tensor& input, float scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { // scalar type of this function is guaranteed to be float float inv_scale = 1.0f / scale; auto iter = TensorIteratorConfig() .check_all_same_dtype(false) .add_output(output) .add_input(input) .build(); gpu_kernel(iter, [=] GPU_LAMBDA(float input_val) -> float { return (fminf( quant_max, fmaxf( quant_min, static_cast<int64_t>( std::nearbyint(input_val * inv_scale) + zero_point))) - zero_point) * scale; }); } void fake_quantize_grad_tensor_kernel_cuda( Tensor& input_grad, const Tensor& input, const Tensor& output_grad, float scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { // scalar type of this function is guaranteed to be float float inv_scale = 1.0f / scale; auto iter = TensorIteratorConfig() .check_all_same_dtype(false) .add_output(input_grad) .add_input(output_grad) .add_input(input) .build(); gpu_kernel(iter, [=] GPU_LAMBDA(float dy, float x) -> float { int64_t Xq = std::nearbyint(x * inv_scale) + zero_point; return (Xq >= quant_min && Xq <= quant_max) * dy; }); } void _fake_quantize_grad_learnable_tensor_kernel_cuda( TensorIterator& iter, float scale, float inv_scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { float dscale_small = quant_min - zero_point; float dscale_big = quant_max - zero_point; gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (float XInput, float dYInput) -> thrust::tuple<float, float, float> { float dXOutput, dZeroPointOutput, dScaleOutput; int64_t xq = std::nearbyint(XInput * inv_scale) + zero_point; dXOutput = dYInput * (xq >= quant_min && xq <= quant_max); xq = ::max(::min(xq, quant_max), quant_min); float xfq = static_cast<float>((xq - zero_point) * scale); if (xq == quant_min || xq == quant_max) { dZeroPointOutput = (dYInput) * (-1) * scale; dScaleOutput = (xq == quant_min) ? (dYInput * dscale_small) : (dYInput * dscale_big); } else { dZeroPointOutput = 0; dScaleOutput = (dYInput) * (xfq - (XInput)) * inv_scale; } return {dXOutput, dScaleOutput, dZeroPointOutput}; }); } REGISTER_DISPATCH(fake_quant_tensor_stub, &fake_quantize_tensor_kernel_cuda); REGISTER_DISPATCH(fake_quant_grad_tensor_stub, &fake_quantize_grad_tensor_kernel_cuda); REGISTER_DISPATCH(fake_quant_grad_learnable_tensor_stub, &_fake_quantize_grad_learnable_tensor_kernel_cuda); // Fake quantize per channel void fake_quant_per_channel_cuda(TensorIterator &iter, int64_t quant_min, int64_t quant_max) { gpu_kernel(iter, [=] GPU_LAMBDA (float input_val, float scale, int64_t zero_point) -> float { float inv_scale = 1.0f / scale; return (fminf( quant_max, fmaxf( quant_min, static_cast<int64_t>( std::nearbyint(input_val * inv_scale) + zero_point))) - zero_point) * scale; }); } void fake_quant_grad_per_channel_cuda(TensorIterator &iter, int64_t quant_min, int64_t quant_max) { gpu_kernel(iter, [=] GPU_LAMBDA (float x, float dy, float scale, int64_t zero_point) -> float { float inv_scale = 1.0f / scale; int64_t Xq = std::nearbyint(x * inv_scale) + zero_point; return (Xq >= quant_min && Xq <= quant_max) * dy; }); } void _fake_quantize_grad_learnable_channel_kernel_cuda(TensorIterator &iter, int64_t quant_min, int64_t quant_max) { gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (float x_input, float dy_input, float scale_input, float zero_point_input) -> thrust::tuple<float, float, float> { float dx_output, dscale_output, dzero_point_output; float inv_scale = 1.0f / scale_input; float dscale_small = quant_min - zero_point_input; float dscale_big = quant_max - zero_point_input; // Calculate gradients for X. int64_t xqi = std::nearbyint(zero_point_input + x_input * inv_scale); dx_output = dy_input * (xqi >= quant_min && xqi <= quant_max); // Calculate gradients for scale and zero point. xqi = ::max(::min(xqi, quant_max), quant_min); float xfqi = static_cast<float>((xqi - zero_point_input) * scale_input); if (xqi == quant_min || xqi == quant_max) { dzero_point_output = dy_input * (-1) * scale_input; dscale_output = (xqi == quant_min) ? (dy_input * dscale_small) : (dy_input * dscale_big); } else { dzero_point_output = 0; dscale_output = dy_input * (xfqi - x_input) * inv_scale; } return {dx_output, dscale_output, dzero_point_output}; }); } REGISTER_DISPATCH(fake_quant_per_channel_stub, &fake_quant_per_channel_cuda); REGISTER_DISPATCH(fake_quant_grad_per_channel_stub, &fake_quant_grad_per_channel_cuda); REGISTER_DISPATCH(fake_quant_grad_learnable_channel_stub, &_fake_quantize_grad_learnable_channel_kernel_cuda); } // namespace native } // namespace at
4c94a3093346ebb43a297f640f16e7eeef3fa1a8.cu
#include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/quantized/fake_quant_affine.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <thrust/tuple.h> #include <cmath> /* Fake quantize a tensor Args: output: output tensor. input : input tensor. sc: scale to quantize the input tensor to zero_point: zero_point quant_min: minimum quantized value quant_max: maximum quantized value Returns: Fake quantized tensor (float dtype). */ namespace at { namespace native { void fake_quantize_tensor_kernel_cuda( Tensor& output, const Tensor& input, float scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { // scalar type of this function is guaranteed to be float float inv_scale = 1.0f / scale; auto iter = TensorIteratorConfig() .check_all_same_dtype(false) .add_output(output) .add_input(input) .build(); gpu_kernel(iter, [=] GPU_LAMBDA(float input_val) -> float { return (fminf( quant_max, fmaxf( quant_min, static_cast<int64_t>( std::nearbyint(input_val * inv_scale) + zero_point))) - zero_point) * scale; }); } void fake_quantize_grad_tensor_kernel_cuda( Tensor& input_grad, const Tensor& input, const Tensor& output_grad, float scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { // scalar type of this function is guaranteed to be float float inv_scale = 1.0f / scale; auto iter = TensorIteratorConfig() .check_all_same_dtype(false) .add_output(input_grad) .add_input(output_grad) .add_input(input) .build(); gpu_kernel(iter, [=] GPU_LAMBDA(float dy, float x) -> float { int64_t Xq = std::nearbyint(x * inv_scale) + zero_point; return (Xq >= quant_min && Xq <= quant_max) * dy; }); } void _fake_quantize_grad_learnable_tensor_kernel_cuda( TensorIterator& iter, float scale, float inv_scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { float dscale_small = quant_min - zero_point; float dscale_big = quant_max - zero_point; gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (float XInput, float dYInput) -> thrust::tuple<float, float, float> { float dXOutput, dZeroPointOutput, dScaleOutput; int64_t xq = std::nearbyint(XInput * inv_scale) + zero_point; dXOutput = dYInput * (xq >= quant_min && xq <= quant_max); xq = std::max(std::min(xq, quant_max), quant_min); float xfq = static_cast<float>((xq - zero_point) * scale); if (xq == quant_min || xq == quant_max) { dZeroPointOutput = (dYInput) * (-1) * scale; dScaleOutput = (xq == quant_min) ? (dYInput * dscale_small) : (dYInput * dscale_big); } else { dZeroPointOutput = 0; dScaleOutput = (dYInput) * (xfq - (XInput)) * inv_scale; } return {dXOutput, dScaleOutput, dZeroPointOutput}; }); } REGISTER_DISPATCH(fake_quant_tensor_stub, &fake_quantize_tensor_kernel_cuda); REGISTER_DISPATCH(fake_quant_grad_tensor_stub, &fake_quantize_grad_tensor_kernel_cuda); REGISTER_DISPATCH(fake_quant_grad_learnable_tensor_stub, &_fake_quantize_grad_learnable_tensor_kernel_cuda); // Fake quantize per channel void fake_quant_per_channel_cuda(TensorIterator &iter, int64_t quant_min, int64_t quant_max) { gpu_kernel(iter, [=] GPU_LAMBDA (float input_val, float scale, int64_t zero_point) -> float { float inv_scale = 1.0f / scale; return (fminf( quant_max, fmaxf( quant_min, static_cast<int64_t>( std::nearbyint(input_val * inv_scale) + zero_point))) - zero_point) * scale; }); } void fake_quant_grad_per_channel_cuda(TensorIterator &iter, int64_t quant_min, int64_t quant_max) { gpu_kernel(iter, [=] GPU_LAMBDA (float x, float dy, float scale, int64_t zero_point) -> float { float inv_scale = 1.0f / scale; int64_t Xq = std::nearbyint(x * inv_scale) + zero_point; return (Xq >= quant_min && Xq <= quant_max) * dy; }); } void _fake_quantize_grad_learnable_channel_kernel_cuda(TensorIterator &iter, int64_t quant_min, int64_t quant_max) { gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (float x_input, float dy_input, float scale_input, float zero_point_input) -> thrust::tuple<float, float, float> { float dx_output, dscale_output, dzero_point_output; float inv_scale = 1.0f / scale_input; float dscale_small = quant_min - zero_point_input; float dscale_big = quant_max - zero_point_input; // Calculate gradients for X. int64_t xqi = std::nearbyint(zero_point_input + x_input * inv_scale); dx_output = dy_input * (xqi >= quant_min && xqi <= quant_max); // Calculate gradients for scale and zero point. xqi = std::max(std::min(xqi, quant_max), quant_min); float xfqi = static_cast<float>((xqi - zero_point_input) * scale_input); if (xqi == quant_min || xqi == quant_max) { dzero_point_output = dy_input * (-1) * scale_input; dscale_output = (xqi == quant_min) ? (dy_input * dscale_small) : (dy_input * dscale_big); } else { dzero_point_output = 0; dscale_output = dy_input * (xfqi - x_input) * inv_scale; } return {dx_output, dscale_output, dzero_point_output}; }); } REGISTER_DISPATCH(fake_quant_per_channel_stub, &fake_quant_per_channel_cuda); REGISTER_DISPATCH(fake_quant_grad_per_channel_stub, &fake_quant_grad_per_channel_cuda); REGISTER_DISPATCH(fake_quant_grad_learnable_channel_stub, &_fake_quantize_grad_learnable_channel_kernel_cuda); } // namespace native } // namespace at
40a1af20153187495ba9950e0fbd0e6b8fc409ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <chrono> #include <cstdint> #include <cstdlib> #include <fstream> #include <iostream> const int THREADS_PER_BLOCK = 512; __global__ void Check(const int n, const int m, const int startLevel, const int* exprVar, const int* exprNeg, int* set, int* flags, const int q) { int e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } for (int i = 0; i < startLevel; ++i) { if (((uint32_t)e) & (((uint32_t)1) << ((uint32_t)i))) { set[e * n + i] = 1; } else { set[e * n + i] = 0; } } for (int i = 0; i < m; ++i) { int disjunctRes = 0; for (int j = 0; j < 3; ++j) { int index = exprVar[i * 3 + j]; if (index >= startLevel) { disjunctRes = -1; } else { int elem = set[e * n + index]; elem ^= exprNeg[i * 3 + j]; if (elem == 1) { disjunctRes = 1; break; } } } if (disjunctRes == 0) { flags[e] = 0; break; } if (disjunctRes == -1) { flags[e] = -1; } } } __global__ void DFS(const int n, const int m, const int startLevel, const int* exprVar, const int* exprNeg, int* set, int* flags, int* isFound, const int q) { int e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } for (int k = startLevel; k >= startLevel;) { if (k == n) { --k; } else if (set[e * n + k] != 0) { set[e * n + k] = (set[e * n + k] == -1 ? 1 : 0); flags[e] = 1; for (int i = 0; i < m; ++i) { int disjunctRes = 0; for (int j = 0; j < 3; ++j) { int index = exprVar[i * 3 + j]; if (index > k) { disjunctRes = -1; } else { int elem = set[e * n + index]; elem ^= exprNeg[i * 3 + j]; if (elem == 1) { disjunctRes = 1; break; } } } if (disjunctRes == 0) { flags[e] = 0; break; } if (disjunctRes == -1) { flags[e] = -1; } } atomicMax(isFound, flags[e]); if (*isFound == 1) { return; } if (flags[e] == 0) { continue; } ++k; } else { set[e * n + k] = -1; --k; } } } int main(int argc, char* argv[]) { std::chrono::high_resolution_clock::time_point totalStart = std::chrono::high_resolution_clock::now(); if (argc != 4) { std::cerr << "Usage: " << argv[0] << " input_file output_file precalc_depth" << std::endl; return 0; } std::ifstream fin(argv[1]); std::ofstream fout(argv[2]); int n, m; fin >> n >> m; int* exprVar = (int*)malloc(3 * m * sizeof(*exprVar)); int* exprNeg = (int*)malloc(3 * m * sizeof(*exprNeg)); int* cudaExprVar = nullptr; int* cudaExprNeg = nullptr; for (int i = 0; i < m; ++i) { fin >> exprVar[3 * i] >> exprNeg[3 * i] >> exprVar[3 * i + 1] >> exprNeg[3 * i + 1] >> exprVar[3 * i + 2] >> exprNeg[3 * i + 2]; --exprVar[3 * i]; --exprVar[3 * i + 1]; --exprVar[3 * i + 2]; } int startLevel = ::min(n, atoi(argv[3])); int q = (1 << startLevel); int* set = (int*)calloc(q * n, sizeof(*set)); int* cudaSet = nullptr; for (int i = 0; i < q * n; ++i) { set[i] = -1; } int* flags = (int*)calloc(q, sizeof(*flags)); for (int i = 0; i < q; ++i) { flags[i] = 1; } int* cudaFlags = nullptr; bool isSolution = false; hipMalloc(&cudaExprVar, 3 * m * sizeof(*exprVar)); hipMalloc(&cudaExprNeg, 3 * m * sizeof(*exprNeg)); hipMalloc(&cudaSet, q * n * sizeof(*set)); hipMalloc(&cudaFlags, q * sizeof(*flags)); hipMemcpy(cudaExprVar, exprVar, 3 * m * sizeof(*exprVar), hipMemcpyHostToDevice); hipMemcpy(cudaExprNeg, exprNeg, 3 * m * sizeof(*exprNeg), hipMemcpyHostToDevice); hipMemcpy(cudaSet, set, q * n * sizeof(*set), hipMemcpyHostToDevice); hipMemcpy(cudaFlags, flags, q * sizeof(*flags), hipMemcpyHostToDevice); int qBlock = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( Check), dim3(qBlock), dim3(THREADS_PER_BLOCK), 0, 0, n, m, startLevel, cudaExprVar, cudaExprNeg, cudaSet, cudaFlags, q); hipMemcpy(set, cudaSet, q * n * sizeof(*set), hipMemcpyDeviceToHost); hipMemcpy(flags, cudaFlags, q * sizeof(*flags), hipMemcpyDeviceToHost); for (int i = 0, j = q - 1;;) { while (i < q && flags[i] != 0) { ++i; } while (j >= 0 && flags[j] == 0) { --j; } if (i >= j) { q = i; break; } memcpy(set + i * n, set + j * n, n * sizeof(*set)); std::swap(flags[i], flags[j]); } int* isFound = nullptr; hipMalloc(&isFound, sizeof(*isFound)); hipMemset(isFound, 0, sizeof(*isFound)); if (q > 0) { hipMemcpy(cudaSet, set, q * n * sizeof(*set), hipMemcpyHostToDevice); hipMemcpy(cudaFlags, flags, q * sizeof(*flags), hipMemcpyHostToDevice); qBlock = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( DFS), dim3(qBlock), dim3(THREADS_PER_BLOCK), 0, 0, n, m, startLevel, cudaExprVar, cudaExprNeg, cudaSet, cudaFlags, isFound, q); hipMemcpy(set, cudaSet, q * n * sizeof(*set), hipMemcpyDeviceToHost); hipMemcpy(flags, cudaFlags, q * sizeof(*flags), hipMemcpyDeviceToHost); } for (int e = 0; e < q; ++e) { if (flags[e] == 1) { for (int i = 0; i < n; ++i) { fout << "x_" << i + 1 << " = " << (set[e * n + i] == 1 ? 1 : 0) << std::endl; } isSolution = true; break; } } if (!isSolution) { fout << "No solution" << std::endl; } free(exprVar); free(exprNeg); free(set); free(flags); hipFree(cudaExprVar); hipFree(cudaExprNeg); hipFree(cudaSet); hipFree(cudaFlags); hipFree(isFound); std::chrono::high_resolution_clock::time_point totalEnd = std::chrono::high_resolution_clock::now(); double totalTime = std::chrono::duration_cast<std::chrono::duration<double>>(totalEnd - totalStart).count(); std::cout << "Total time: " << totalTime << std::endl; return 0; }
40a1af20153187495ba9950e0fbd0e6b8fc409ec.cu
#include <algorithm> #include <chrono> #include <cstdint> #include <cstdlib> #include <fstream> #include <iostream> const int THREADS_PER_BLOCK = 512; __global__ void Check(const int n, const int m, const int startLevel, const int* exprVar, const int* exprNeg, int* set, int* flags, const int q) { int e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } for (int i = 0; i < startLevel; ++i) { if (((uint32_t)e) & (((uint32_t)1) << ((uint32_t)i))) { set[e * n + i] = 1; } else { set[e * n + i] = 0; } } for (int i = 0; i < m; ++i) { int disjunctRes = 0; for (int j = 0; j < 3; ++j) { int index = exprVar[i * 3 + j]; if (index >= startLevel) { disjunctRes = -1; } else { int elem = set[e * n + index]; elem ^= exprNeg[i * 3 + j]; if (elem == 1) { disjunctRes = 1; break; } } } if (disjunctRes == 0) { flags[e] = 0; break; } if (disjunctRes == -1) { flags[e] = -1; } } } __global__ void DFS(const int n, const int m, const int startLevel, const int* exprVar, const int* exprNeg, int* set, int* flags, int* isFound, const int q) { int e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } for (int k = startLevel; k >= startLevel;) { if (k == n) { --k; } else if (set[e * n + k] != 0) { set[e * n + k] = (set[e * n + k] == -1 ? 1 : 0); flags[e] = 1; for (int i = 0; i < m; ++i) { int disjunctRes = 0; for (int j = 0; j < 3; ++j) { int index = exprVar[i * 3 + j]; if (index > k) { disjunctRes = -1; } else { int elem = set[e * n + index]; elem ^= exprNeg[i * 3 + j]; if (elem == 1) { disjunctRes = 1; break; } } } if (disjunctRes == 0) { flags[e] = 0; break; } if (disjunctRes == -1) { flags[e] = -1; } } atomicMax(isFound, flags[e]); if (*isFound == 1) { return; } if (flags[e] == 0) { continue; } ++k; } else { set[e * n + k] = -1; --k; } } } int main(int argc, char* argv[]) { std::chrono::high_resolution_clock::time_point totalStart = std::chrono::high_resolution_clock::now(); if (argc != 4) { std::cerr << "Usage: " << argv[0] << " input_file output_file precalc_depth" << std::endl; return 0; } std::ifstream fin(argv[1]); std::ofstream fout(argv[2]); int n, m; fin >> n >> m; int* exprVar = (int*)malloc(3 * m * sizeof(*exprVar)); int* exprNeg = (int*)malloc(3 * m * sizeof(*exprNeg)); int* cudaExprVar = nullptr; int* cudaExprNeg = nullptr; for (int i = 0; i < m; ++i) { fin >> exprVar[3 * i] >> exprNeg[3 * i] >> exprVar[3 * i + 1] >> exprNeg[3 * i + 1] >> exprVar[3 * i + 2] >> exprNeg[3 * i + 2]; --exprVar[3 * i]; --exprVar[3 * i + 1]; --exprVar[3 * i + 2]; } int startLevel = std::min(n, atoi(argv[3])); int q = (1 << startLevel); int* set = (int*)calloc(q * n, sizeof(*set)); int* cudaSet = nullptr; for (int i = 0; i < q * n; ++i) { set[i] = -1; } int* flags = (int*)calloc(q, sizeof(*flags)); for (int i = 0; i < q; ++i) { flags[i] = 1; } int* cudaFlags = nullptr; bool isSolution = false; cudaMalloc(&cudaExprVar, 3 * m * sizeof(*exprVar)); cudaMalloc(&cudaExprNeg, 3 * m * sizeof(*exprNeg)); cudaMalloc(&cudaSet, q * n * sizeof(*set)); cudaMalloc(&cudaFlags, q * sizeof(*flags)); cudaMemcpy(cudaExprVar, exprVar, 3 * m * sizeof(*exprVar), cudaMemcpyHostToDevice); cudaMemcpy(cudaExprNeg, exprNeg, 3 * m * sizeof(*exprNeg), cudaMemcpyHostToDevice); cudaMemcpy(cudaSet, set, q * n * sizeof(*set), cudaMemcpyHostToDevice); cudaMemcpy(cudaFlags, flags, q * sizeof(*flags), cudaMemcpyHostToDevice); int qBlock = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; Check<<<qBlock, THREADS_PER_BLOCK>>>(n, m, startLevel, cudaExprVar, cudaExprNeg, cudaSet, cudaFlags, q); cudaMemcpy(set, cudaSet, q * n * sizeof(*set), cudaMemcpyDeviceToHost); cudaMemcpy(flags, cudaFlags, q * sizeof(*flags), cudaMemcpyDeviceToHost); for (int i = 0, j = q - 1;;) { while (i < q && flags[i] != 0) { ++i; } while (j >= 0 && flags[j] == 0) { --j; } if (i >= j) { q = i; break; } memcpy(set + i * n, set + j * n, n * sizeof(*set)); std::swap(flags[i], flags[j]); } int* isFound = nullptr; cudaMalloc(&isFound, sizeof(*isFound)); cudaMemset(isFound, 0, sizeof(*isFound)); if (q > 0) { cudaMemcpy(cudaSet, set, q * n * sizeof(*set), cudaMemcpyHostToDevice); cudaMemcpy(cudaFlags, flags, q * sizeof(*flags), cudaMemcpyHostToDevice); qBlock = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; DFS<<<qBlock, THREADS_PER_BLOCK>>>(n, m, startLevel, cudaExprVar, cudaExprNeg, cudaSet, cudaFlags, isFound, q); cudaMemcpy(set, cudaSet, q * n * sizeof(*set), cudaMemcpyDeviceToHost); cudaMemcpy(flags, cudaFlags, q * sizeof(*flags), cudaMemcpyDeviceToHost); } for (int e = 0; e < q; ++e) { if (flags[e] == 1) { for (int i = 0; i < n; ++i) { fout << "x_" << i + 1 << " = " << (set[e * n + i] == 1 ? 1 : 0) << std::endl; } isSolution = true; break; } } if (!isSolution) { fout << "No solution" << std::endl; } free(exprVar); free(exprNeg); free(set); free(flags); cudaFree(cudaExprVar); cudaFree(cudaExprNeg); cudaFree(cudaSet); cudaFree(cudaFlags); cudaFree(isFound); std::chrono::high_resolution_clock::time_point totalEnd = std::chrono::high_resolution_clock::now(); double totalTime = std::chrono::duration_cast<std::chrono::duration<double>>(totalEnd - totalStart).count(); std::cout << "Total time: " << totalTime << std::endl; return 0; }
bfa4e05d3faa7627425bfa25910925bffbe7c4f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cstdio> #include<algorithm> #include<climits> extern "C" { __global__ void initialize(int const *capacity, int *flow, int *excess, int *height, int const s_x, int const s_y, int const N) { // count coords int const x = (blockIdx.x * 32) + threadIdx.x; int const y = (blockIdx.y * 32) + threadIdx.y; if (x>=N || y>=N) return; int u = y*N + x; if (x == s_x && y == s_y) { height[u] = N; // h(s) <- |V| excess[u] = INT_MAX/4; //printf("START HEIGHT SET TO %d %d\n", height[y*N+x], excess[y*N+x]); } else { height[u] = 0; // h(u) <- 0 excess[u] = 0; //Dla dowolnego wierzchoka (x,y) przepustowoci krawdzi prowadzcych do jego ssiadw wynosz: capacity[4(Ny+x)] (krawd w gr), [...+1] (krawd w prawo), [...+2] (krawd w d), [...3] (krawd w lewo). int edge = 4*u; for (int i = 0; i < 4; ++i) { flow[edge] = capacity[edge]; // Cf(u, v) <- Cuv edge++; } } __syncthreads(); if (x == s_x && y == s_y) { int const vertex_x[] = {0, 1, 0, -1}; int const vertex_y[] = {-1, 0, 1, 0}; int edge = 4*u; for (int i = 0; i < 4; ++i) { flow[edge] = 0; // Cf(s, u) <- 0 int const tmpx = x+vertex_x[i]; int const tmpy = y+vertex_y[i]; if(tmpx >= 0 && tmpx < N && tmpy>=0 && tmpy < N) { excess[N*tmpy+tmpx] = capacity[edge]; // e(u) = C(s, u) flow[4*(N*tmpy+tmpx)+((i+2)%4)] += capacity[edge];// + capacity[4*(N*tmpy+tmpx)+(i+2)%4]; } edge++; /* (x, y-1) (x+1, y) (x, y+1) (x-1, y) */ } } return; } __global__ void push_relabel(int *excess, int *height, int *flow, int const N, int const s_x, int const s_y, int const t_x, int const t_y) { // count coords int const x = (blockIdx.x * 32) + threadIdx.x; int const y = (blockIdx.y * 32) + threadIdx.y; if (x>=N || y>=N) return; if (x == s_x && y == s_y) return; if (x == t_x && y == t_y) return; if(excess[N*y+x]>0){ int u = N*y + x; int temp_e = excess[u]; int temp_v_x = -1, temp_v_y = -1; int temp_h = INT_MAX/2; int temp_v_it = -1; int const vertex_x[] = {0, 1, 0, -1}; int const vertex_y[] = {-1, 0, 1, 0}; int edge = 4*u; for (int i = 0; i < 4; ++i, ++edge) { if(flow[edge] <= 0) continue; int const tmpx = x+vertex_x[i]; int const tmpy = y+vertex_y[i]; if(tmpx < 0 || tmpx >= N || tmpy<0 || tmpy >= N) continue; int it = N*tmpy + tmpx; int try_h = height[it]; if(temp_v_it == -1 || try_h < height[N*temp_v_y+temp_v_x]) { temp_h = try_h; temp_v_x = tmpx; temp_v_y = tmpy; temp_v_it = i; } } if (temp_h < height[u]) { int d = min(temp_e, flow[4*u+temp_v_it]); atomicAdd(&flow[4*u+temp_v_it], -d); atomicAdd(&flow[4*(N*temp_v_y+temp_v_x)+((temp_v_it+2)%4)], d); atomicAdd(&excess[u], -d); atomicAdd(&excess[N*temp_v_y+temp_v_x], d); } else { height[u] = temp_h+1; } } } __global__ void check_excess(int * excess, int * place_info, int const N, int const s_x, int const s_y, int const t_x, int const t_y) { // count coords int const x = (blockIdx.x * 32) + threadIdx.x; int const y = (blockIdx.y * 32) + threadIdx.y; if (x>=N || y>=N) return; if (s_x == x && s_y == y) return; if (t_x == x && t_y == y) { place_info[1] = excess[y*N+x]; } else if (excess[y*N+x] > 0) { atomicAdd(&place_info[0], 1); } } }
bfa4e05d3faa7627425bfa25910925bffbe7c4f6.cu
#include<cstdio> #include<algorithm> #include<climits> extern "C" { __global__ void initialize(int const *capacity, int *flow, int *excess, int *height, int const s_x, int const s_y, int const N) { // count coords int const x = (blockIdx.x * 32) + threadIdx.x; int const y = (blockIdx.y * 32) + threadIdx.y; if (x>=N || y>=N) return; int u = y*N + x; if (x == s_x && y == s_y) { height[u] = N; // h(s) <- |V| excess[u] = INT_MAX/4; //printf("START HEIGHT SET TO %d %d\n", height[y*N+x], excess[y*N+x]); } else { height[u] = 0; // h(u) <- 0 excess[u] = 0; //Dla dowolnego wierzchołka (x,y) przepustowości krawędzi prowadzących do jego sąsiadów wynoszą: capacity[4∗(N∗y+x)] (krawędź w górę), [...+1] (krawędź w prawo), [...+2] (krawędź w dół), [...3] (krawędź w lewo). int edge = 4*u; for (int i = 0; i < 4; ++i) { flow[edge] = capacity[edge]; // Cf(u, v) <- Cuv edge++; } } __syncthreads(); if (x == s_x && y == s_y) { int const vertex_x[] = {0, 1, 0, -1}; int const vertex_y[] = {-1, 0, 1, 0}; int edge = 4*u; for (int i = 0; i < 4; ++i) { flow[edge] = 0; // Cf(s, u) <- 0 int const tmpx = x+vertex_x[i]; int const tmpy = y+vertex_y[i]; if(tmpx >= 0 && tmpx < N && tmpy>=0 && tmpy < N) { excess[N*tmpy+tmpx] = capacity[edge]; // e(u) = C(s, u) flow[4*(N*tmpy+tmpx)+((i+2)%4)] += capacity[edge];// + capacity[4*(N*tmpy+tmpx)+(i+2)%4]; } edge++; /* (x, y-1) (x+1, y) (x, y+1) (x-1, y) */ } } return; } __global__ void push_relabel(int *excess, int *height, int *flow, int const N, int const s_x, int const s_y, int const t_x, int const t_y) { // count coords int const x = (blockIdx.x * 32) + threadIdx.x; int const y = (blockIdx.y * 32) + threadIdx.y; if (x>=N || y>=N) return; if (x == s_x && y == s_y) return; if (x == t_x && y == t_y) return; if(excess[N*y+x]>0){ int u = N*y + x; int temp_e = excess[u]; int temp_v_x = -1, temp_v_y = -1; int temp_h = INT_MAX/2; int temp_v_it = -1; int const vertex_x[] = {0, 1, 0, -1}; int const vertex_y[] = {-1, 0, 1, 0}; int edge = 4*u; for (int i = 0; i < 4; ++i, ++edge) { if(flow[edge] <= 0) continue; int const tmpx = x+vertex_x[i]; int const tmpy = y+vertex_y[i]; if(tmpx < 0 || tmpx >= N || tmpy<0 || tmpy >= N) continue; int it = N*tmpy + tmpx; int try_h = height[it]; if(temp_v_it == -1 || try_h < height[N*temp_v_y+temp_v_x]) { temp_h = try_h; temp_v_x = tmpx; temp_v_y = tmpy; temp_v_it = i; } } if (temp_h < height[u]) { int d = min(temp_e, flow[4*u+temp_v_it]); atomicAdd(&flow[4*u+temp_v_it], -d); atomicAdd(&flow[4*(N*temp_v_y+temp_v_x)+((temp_v_it+2)%4)], d); atomicAdd(&excess[u], -d); atomicAdd(&excess[N*temp_v_y+temp_v_x], d); } else { height[u] = temp_h+1; } } } __global__ void check_excess(int * excess, int * place_info, int const N, int const s_x, int const s_y, int const t_x, int const t_y) { // count coords int const x = (blockIdx.x * 32) + threadIdx.x; int const y = (blockIdx.y * 32) + threadIdx.y; if (x>=N || y>=N) return; if (s_x == x && s_y == y) return; if (t_x == x && t_y == y) { place_info[1] = excess[y*N+x]; } else if (excess[y*N+x] > 0) { atomicAdd(&place_info[0], 1); } } }
2d96a304b0326a3d56736b393c6badfebc7f1c78.hip
// !!! This is a file automatically generated by hipify!!! /* File: matmult-cuda-double.cu * * Purpose: * * Input: * * Output: * * Compile: nvcc -o matmult-cuda-double.o matmult-cuda-double.cu * * Run: ./matmult-cuda-double.o * * Algorithm: * * Note: * * */ #include <stdio.h> #include <hip/hip_runtime.h> __global__ void VecAdd(double* A, double* B, double* C, int N) { int index = blockIdx.x * blockDim.x + threadIdx.x; //indice del vector int ix; //ix indica el renglon int iy; //iy toma valores solo entre 0 a N-1 double result; //Acumula la suma del renglon por la columna int k; // Iterador if(index < N * N) { ix = index / N; iy = index % N; result = 0.0; for(k = 0; k < N; k++) result += A[k + N * ix] * B[k * N + iy ]; C[iy + N * ix] = result; } } // Host code int main() { //Variables int N; // Tamao de la matriz cuadrada. int i; // Indice del renglon. int j; // Indice de la columna. size_t size; // Tamao total en memoria. double* h_A; // Matriz A en el equipo. double* h_B; // Matriz B en el equipo. double* h_C; // Matriz C (resultado) en el equipo. double* d_A; // Matriz A en la memoria de la GPU. double* d_B; // Matriz B en la memoria de la GPU. double* d_C; // Matriz C (resultado) en la memoria de la GPU. int Tam; // Numero de datos que se manejan int NumHilos; // Hilos por bloque int numBlock; // Numero de bloques necesario para procesar los datos //Asignacion de variables N = 2500; size = N * sizeof(double) * N; //En la memoria del equipo h_A = (double*)malloc(size); h_B = (double*)malloc(size); h_C = (double*)malloc(size); //En la memoria de la GPU hipMalloc(&d_A, size); hipMalloc(&d_B, size); hipMalloc(&d_C, size); // Tam = N * N; NumHilos = 1024; numBlock = Tam / NumHilos; if(Tam % NumHilos > 0) //Si sobran datos, aumenta los bloques en 1 numBlock++; // LLena los arreglos A y B for(i = 0;i < N;i++) //Renglon for(j = 0;j < N;j++) // Columna { h_A[j + i * N] = rand() % (11 * (i + 1)) * 1.12; h_B[j + i * N] = rand() % (11 * (i + 1)) * 1.12; } //Copia los arreglos de memoria del CPU a memoria de la GPU hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); // Invoke kernel hipLaunchKernelGGL(( VecAdd), dim3(numBlock), dim3(NumHilos) , 0, 0, d_A, d_B, d_C, N); //Copia el resultado de la multiplicacion de memoria de la GPU a memoria de la CPU hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); /* //Imprime la matriz A printf("Matriz A\n"); for(i = 0;i < N;i++) { for(j = 0;j < N;j++) printf("%.2e ", h_A[j + i * N]); printf("\n"); } //Imprime la matriz B printf("Matriz B\n"); for(i = 0;i < N;i++) { for(j = 0;j < N;j++) printf("%.2e ", h_B[j + i * N]); printf("\n"); } //Imprime la matriz C printf("Matriz C\n"); for(i = 0;i < N;i++) { for(j = 0;j < N;j++) printf("%.2e ", h_C[j + i * N]); printf("\n"); }*/ //Libera la memoria utilizada. // Free device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); }
2d96a304b0326a3d56736b393c6badfebc7f1c78.cu
/* File: matmult-cuda-double.cu * * Purpose: * * Input: * * Output: * * Compile: nvcc -o matmult-cuda-double.o matmult-cuda-double.cu * * Run: ./matmult-cuda-double.o * * Algorithm: * * Note: * * */ #include <stdio.h> #include <cuda_runtime.h> __global__ void VecAdd(double* A, double* B, double* C, int N) { int index = blockIdx.x * blockDim.x + threadIdx.x; //indice del vector int ix; //ix indica el renglon int iy; //iy toma valores solo entre 0 a N-1 double result; //Acumula la suma del renglon por la columna int k; // Iterador if(index < N * N) { ix = index / N; iy = index % N; result = 0.0; for(k = 0; k < N; k++) result += A[k + N * ix] * B[k * N + iy ]; C[iy + N * ix] = result; } } // Host code int main() { //Variables int N; // Tamaño de la matriz cuadrada. int i; // Indice del renglon. int j; // Indice de la columna. size_t size; // Tamaño total en memoria. double* h_A; // Matriz A en el equipo. double* h_B; // Matriz B en el equipo. double* h_C; // Matriz C (resultado) en el equipo. double* d_A; // Matriz A en la memoria de la GPU. double* d_B; // Matriz B en la memoria de la GPU. double* d_C; // Matriz C (resultado) en la memoria de la GPU. int Tam; // Numero de datos que se manejan int NumHilos; // Hilos por bloque int numBlock; // Numero de bloques necesario para procesar los datos //Asignacion de variables N = 2500; size = N * sizeof(double) * N; //En la memoria del equipo h_A = (double*)malloc(size); h_B = (double*)malloc(size); h_C = (double*)malloc(size); //En la memoria de la GPU cudaMalloc(&d_A, size); cudaMalloc(&d_B, size); cudaMalloc(&d_C, size); // Tam = N * N; NumHilos = 1024; numBlock = Tam / NumHilos; if(Tam % NumHilos > 0) //Si sobran datos, aumenta los bloques en 1 numBlock++; // LLena los arreglos A y B for(i = 0;i < N;i++) //Renglon for(j = 0;j < N;j++) // Columna { h_A[j + i * N] = rand() % (11 * (i + 1)) * 1.12; h_B[j + i * N] = rand() % (11 * (i + 1)) * 1.12; } //Copia los arreglos de memoria del CPU a memoria de la GPU cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // Invoke kernel VecAdd<<<numBlock, NumHilos >>>(d_A, d_B, d_C, N); //Copia el resultado de la multiplicacion de memoria de la GPU a memoria de la CPU cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); /* //Imprime la matriz A printf("Matriz A\n"); for(i = 0;i < N;i++) { for(j = 0;j < N;j++) printf("%.2e ", h_A[j + i * N]); printf("\n"); } //Imprime la matriz B printf("Matriz B\n"); for(i = 0;i < N;i++) { for(j = 0;j < N;j++) printf("%.2e ", h_B[j + i * N]); printf("\n"); } //Imprime la matriz C printf("Matriz C\n"); for(i = 0;i < N;i++) { for(j = 0;j < N;j++) printf("%.2e ", h_C[j + i * N]); printf("\n"); }*/ //Libera la memoria utilizada. // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); }
77af23516ed9de04cf415612ffb615d345636cc1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "task1.h" #include "task2.h" __global__ void mul_gpu(int* a, int* b, int* c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid <= N / 256) c[tid] = abs(a[tid]) * abs(b[tid]) * a[tid] * b[tid] / (abs(a[tid]) * abs(b[tid])); } int task3() { //cuda- : hipEvent_t start, stop; float elepsedTime; hipStream_t stream0, stream1; hipStreamCreate(&stream0); hipStreamCreate(&stream1); printf("\n\tmultiplication of vectors through 2 streams:\n"); for (long var_size = 1024; var_size <= FULL_DATA_SIZE / 2; var_size *= 2) { int* host_a, * host_b, * host_c; int* dev_a0, * dev_b0, * dev_c0;// gpu input buffer for stream0, int* dev_a1, * dev_b1, * dev_c1;// gpu input buffer for stream1, // gpu hipMalloc((void**)&dev_a0, var_size * sizeof(int)); hipMalloc((void**)&dev_b0, var_size * sizeof(int)); hipMalloc((void**)&dev_c0, var_size * sizeof(int)); // gpu hipMalloc((void**)&dev_a1, var_size * sizeof(int)); hipMalloc((void**)&dev_b1, var_size * sizeof(int)); hipMalloc((void**)&dev_c1, var_size * sizeof(int)); // page-locked , hipHostMalloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault); hipHostMalloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault); hipHostMalloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault); for (int i = 0; i < FULL_DATA_SIZE; i++) { host_a[i] = i;//rand(); host_b[i] = i;// rand(); } hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // , : for (int i = 0; i < FULL_DATA_SIZE; i += var_size * 2) { // : hipMemcpyAsync(dev_a0, host_a + i, var_size * sizeof(int), hipMemcpyHostToDevice, stream0); hipMemcpyAsync(dev_b0, host_b + i, var_size * sizeof(int), hipMemcpyHostToDevice, stream0); hipLaunchKernelGGL(( mul_gpu) , dim3(var_size / 256), dim3(256), 0, stream0 , dev_a0, dev_b0, dev_c0); // hipMemcpyAsync(host_c + i, dev_c0, var_size * sizeof(int), hipMemcpyDeviceToHost, stream0); // hipMemcpyAsync(dev_a1, host_a + i + var_size, var_size * sizeof(int), hipMemcpyHostToDevice, stream1); hipMemcpyAsync(dev_b1, host_b + i + var_size, var_size * sizeof(int), hipMemcpyHostToDevice, stream1); hipLaunchKernelGGL(( mul_gpu) , dim3(var_size / 256), dim3(256), 0, stream1 , dev_a1, dev_b1, dev_c1); // hipMemcpyAsync(host_c + i + var_size, dev_c1, var_size * sizeof(int), hipMemcpyDeviceToHost, stream1); } // hipStreamSynchronize(stream0); hipStreamSynchronize(stream1); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elepsedTime, start, stop); printf("var_size = FULL_DATA_SIZE / %5ld. Time taken: %f ms\n", FULL_DATA_SIZE / var_size, elepsedTime); //: /* for (int i = 0; i < N / 256; i++) { printf("%d+%d=%d\n", host_a[i], host_b[i], host_c[i]); } */ hipHostFree(host_a); hipHostFree(host_b); hipHostFree(host_c); hipFree(dev_a0); hipFree(dev_b0); hipFree(dev_c0); hipFree(dev_a1); hipFree(dev_b1); hipFree(dev_c1); } hipStreamDestroy(stream0); hipStreamDestroy(stream1); return 0; }
77af23516ed9de04cf415612ffb615d345636cc1.cu
#include "task1.h" #include "task2.h" __global__ void mul_gpu(int* a, int* b, int* c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid <= N / 256) c[tid] = abs(a[tid]) * abs(b[tid]) * a[tid] * b[tid] / (abs(a[tid]) * abs(b[tid])); } int task3() { //cuda-события для замерения времени выполнения: cudaEvent_t start, stop; float elepsedTime; cudaStream_t stream0, stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); printf("\n\tmultiplication of vectors through 2 streams:\n"); for (long var_size = 1024; var_size <= FULL_DATA_SIZE / 2; var_size *= 2) { int* host_a, * host_b, * host_c; int* dev_a0, * dev_b0, * dev_c0;// первый gpu input buffer for stream0, который будет заполнен рандомными числами int* dev_a1, * dev_b1, * dev_c1;// второй gpu input buffer for stream1, который будет заполнен рандомными числами //выделение памяти на gpu cudaMalloc((void**)&dev_a0, var_size * sizeof(int)); cudaMalloc((void**)&dev_b0, var_size * sizeof(int)); cudaMalloc((void**)&dev_c0, var_size * sizeof(int)); //выделение памяти на gpu cudaMalloc((void**)&dev_a1, var_size * sizeof(int)); cudaMalloc((void**)&dev_b1, var_size * sizeof(int)); cudaMalloc((void**)&dev_c1, var_size * sizeof(int)); // выделение page-locked памяти, испльзуемой для стримов cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); for (int i = 0; i < FULL_DATA_SIZE; i++) { host_a[i] = i;//rand(); host_b[i] = i;// rand(); } cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // теперь проитерировать всю дату, через байтные куски: for (int i = 0; i < FULL_DATA_SIZE; i += var_size * 2) { // асинхронно копировать закрытую память на устройство: cudaMemcpyAsync(dev_a0, host_a + i, var_size * sizeof(int), cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(dev_b0, host_b + i, var_size * sizeof(int), cudaMemcpyHostToDevice, stream0); mul_gpu <<< var_size / 256, 256, 0, stream0 >>> (dev_a0, dev_b0, dev_c0); // копировать дату с устройства на закрытую памятьЖ cudaMemcpyAsync(host_c + i, dev_c0, var_size * sizeof(int), cudaMemcpyDeviceToHost, stream0); // асинхронно копировать закрытиую память на устройство cudaMemcpyAsync(dev_a1, host_a + i + var_size, var_size * sizeof(int), cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(dev_b1, host_b + i + var_size, var_size * sizeof(int), cudaMemcpyHostToDevice, stream1); mul_gpu <<< var_size / 256, 256, 0, stream1 >>> (dev_a1, dev_b1, dev_c1); // копировать дату с устройства на закрытую память cudaMemcpyAsync(host_c + i + var_size, dev_c1, var_size * sizeof(int), cudaMemcpyDeviceToHost, stream1); } // синхронизируем оба потока cudaStreamSynchronize(stream0); cudaStreamSynchronize(stream1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elepsedTime, start, stop); printf("var_size = FULL_DATA_SIZE / %5ld. Time taken: %f ms\n", FULL_DATA_SIZE / var_size, elepsedTime); //проверка: /* for (int i = 0; i < N / 256; i++) { printf("%d+%d=%d\n", host_a[i], host_b[i], host_c[i]); } */ cudaFreeHost(host_a); cudaFreeHost(host_b); cudaFreeHost(host_c); cudaFree(dev_a0); cudaFree(dev_b0); cudaFree(dev_c0); cudaFree(dev_a1); cudaFree(dev_b1); cudaFree(dev_c1); } cudaStreamDestroy(stream0); cudaStreamDestroy(stream1); return 0; }
bf7b154a338dd3bcf91d3c0851a9be84119a2498.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file reader_impl.cu * @brief cuDF-IO ORC reader class implementation */ #include "orc.h" #include "orc_gpu.h" #include "reader_impl.hpp" #include "timezone.cuh" #include <io/comp/gpuinflate.h> #include <io/utilities/config_utils.hpp> #include <io/utilities/time_utils.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <nvcomp/snappy.h> #include <algorithm> #include <iterator> namespace cudf { namespace io { namespace detail { namespace orc { using namespace cudf::io::orc; namespace { /** * @brief Function that translates ORC data kind to cuDF type enum */ constexpr type_id to_type_id(const orc::SchemaType& schema, bool use_np_dtypes, type_id timestamp_type_id, type_id decimal_type_id) { switch (schema.kind) { case orc::BOOLEAN: return type_id::BOOL8; case orc::BYTE: return type_id::INT8; case orc::SHORT: return type_id::INT16; case orc::INT: return type_id::INT32; case orc::LONG: return type_id::INT64; case orc::FLOAT: return type_id::FLOAT32; case orc::DOUBLE: return type_id::FLOAT64; case orc::STRING: case orc::BINARY: case orc::VARCHAR: case orc::CHAR: // Variable-length types can all be mapped to STRING return type_id::STRING; case orc::TIMESTAMP: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; case orc::DATE: // There isn't a (DAYS -> np.dtype) mapping return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS; case orc::DECIMAL: return decimal_type_id; // Need to update once cuDF plans to support map type case orc::MAP: case orc::LIST: return type_id::LIST; case orc::STRUCT: return type_id::STRUCT; default: break; } return type_id::EMPTY; } constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos( const orc::StreamKind kind, uint32_t skip_count, bool non_child) { switch (kind) { case orc::DATA: skip_count += 1; skip_count |= (skip_count & 0xff) << 8; return std::make_pair(gpu::CI_DATA, skip_count); case orc::LENGTH: case orc::SECONDARY: skip_count += 1; skip_count |= (skip_count & 0xff) << 16; return std::make_pair(gpu::CI_DATA2, skip_count); case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count); case orc::PRESENT: skip_count += (non_child ? 1 : 0); return std::make_pair(gpu::CI_PRESENT, skip_count); case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count); default: // Skip this stream as it's not strictly required return std::make_pair(gpu::CI_NUM_STREAMS, 0); } } /** * @brief struct to store buffer data and size of list buffer */ struct list_buffer_data { size_type* data; size_type size; }; // Generates offsets for list buffer from number of elements in a row. void generate_offsets_for_list(rmm::device_uvector<list_buffer_data> const& buff_data, rmm::cuda_stream_view stream) { auto transformer = [] __device__(list_buffer_data list_data) { thrust::exclusive_scan( thrust::seq, list_data.data, list_data.data + list_data.size, list_data.data); }; thrust::for_each(rmm::exec_policy(stream), buff_data.begin(), buff_data.end(), transformer); stream.synchronize(); } /** * @brief Struct that maps ORC streams to columns */ struct orc_stream_info { orc_stream_info() = default; explicit orc_stream_info( uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_) : offset(offset_), dst_pos(dst_pos_), length(length_), gdf_idx(gdf_idx_), stripe_idx(stripe_idx_) { } uint64_t offset; // offset in file size_t dst_pos; // offset in memory relative to start of compressed stripe data size_t length; // length in file uint32_t gdf_idx; // column index uint32_t stripe_idx; // stripe index }; /** * @brief Function that populates column descriptors stream/chunk */ size_t gather_stream_info(const size_t stripe_index, const orc::StripeInformation* stripeinfo, const orc::StripeFooter* stripefooter, const std::vector<int>& orc2gdf, const std::vector<orc::SchemaType> types, bool use_index, size_t* num_dictionary_entries, cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, std::vector<orc_stream_info>& stream_info, bool apply_struct_map) { uint64_t src_offset = 0; uint64_t dst_offset = 0; for (const auto& stream : stripefooter->streams) { if (!stream.column_id || *stream.column_id >= orc2gdf.size()) { dst_offset += stream.length; continue; } auto const column_id = *stream.column_id; auto col = orc2gdf[column_id]; if (col == -1 and apply_struct_map) { // A struct-type column has no data itself, but rather child columns // for each of its fields. There is only a PRESENT stream, which // needs to be included for the reader. const auto schema_type = types[column_id]; if (schema_type.subtypes.size() != 0) { if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) { for (const auto& idx : schema_type.subtypes) { auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1; if (child_idx >= 0) { col = child_idx; auto& chunk = chunks[stripe_index][col]; chunk.strm_id[gpu::CI_PRESENT] = stream_info.size(); chunk.strm_len[gpu::CI_PRESENT] = stream.length; } } } } } if (col != -1) { if (src_offset >= stripeinfo->indexLength || use_index) { // NOTE: skip_count field is temporarily used to track index ordering auto& chunk = chunks[stripe_index][col]; const auto idx = get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[column_id]); if (idx.first < gpu::CI_NUM_STREAMS) { chunk.strm_id[idx.first] = stream_info.size(); chunk.strm_len[idx.first] = stream.length; chunk.skip_count = idx.second; if (idx.first == gpu::CI_DICTIONARY) { chunk.dictionary_start = *num_dictionary_entries; chunk.dict_len = stripefooter->columns[column_id].dictionarySize; *num_dictionary_entries += stripefooter->columns[column_id].dictionarySize; } } } stream_info.emplace_back( stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index); dst_offset += stream.length; } src_offset += stream.length; } return dst_offset; } /** * @brief Determines cuDF type of an ORC Decimal column. */ auto decimal_column_type(const std::vector<std::string>& float64_columns, const std::vector<std::string>& decimal128_columns, cudf::io::orc::metadata& metadata, int column_index) { auto const& column_path = metadata.column_path(column_index); auto is_column_in = [&](const std::vector<std::string>& cols) { return std::find(cols.cbegin(), cols.cend(), column_path) != cols.end(); }; auto const user_selected_float64 = is_column_in(float64_columns); auto const user_selected_decimal128 = is_column_in(decimal128_columns); CUDF_EXPECTS(not user_selected_float64 or not user_selected_decimal128, "Both decimal128 and float64 types selected for column " + column_path); if (user_selected_float64) return type_id::FLOAT64; if (user_selected_decimal128) return type_id::DECIMAL128; return type_id::DECIMAL64; } } // namespace void snappy_decompress(device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_stat, size_t max_uncomp_page_size, rmm::cuda_stream_view stream) { size_t num_blocks = comp_in.size(); size_t temp_size; auto status = nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_page_size, &temp_size); CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "Unable to get scratch size for snappy decompression"); rmm::device_buffer scratch(temp_size, stream); rmm::device_uvector<void const*> compressed_data_ptrs(num_blocks, stream); rmm::device_uvector<size_t> compressed_data_sizes(num_blocks, stream); rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream); rmm::device_uvector<size_t> uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream); // Prepare the vectors auto comp_it = thrust::make_zip_iterator(compressed_data_ptrs.begin(), compressed_data_sizes.begin(), uncompressed_data_ptrs.begin(), uncompressed_data_sizes.data()); thrust::transform(rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(gpu_inflate_input_s in) { return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice, in.dstSize); }); status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.data(), compressed_data_sizes.data(), uncompressed_data_sizes.data(), actual_uncompressed_data_sizes.data(), num_blocks, scratch.data(), scratch.size(), uncompressed_data_ptrs.data(), statuses.data(), stream.value()); CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "unable to perform snappy decompression"); CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream), statuses.begin(), statuses.end(), thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)), "Error during snappy decompression"); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator(0), num_blocks, [=, actual_uncomp_sizes = actual_uncompressed_data_sizes.data()] __device__(auto i) { comp_stat[i].bytes_written = actual_uncomp_sizes[i]; comp_stat[i].status = 0; }); } rmm::device_buffer reader::impl::decompress_stripe_data( cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, const std::vector<rmm::device_buffer>& stripe_data, const OrcDecompressor* decompressor, std::vector<orc_stream_info>& stream_info, size_t num_stripes, cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups, size_t row_index_stride, bool use_base_stride, rmm::cuda_stream_view stream) { // Parse the columns' compressed info hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream); for (const auto& info : stream_info) { compinfo.insert(gpu::CompressedStreamInfo( static_cast<const uint8_t*>(stripe_data[info.stripe_idx].data()) + info.dst_pos, info.length)); } compinfo.host_to_device(stream); gpu::ParseCompressedStripeData(compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(), decompressor->GetLog2MaxCompressionRatio(), stream); compinfo.device_to_host(stream, true); // Count the exact number of compressed blocks size_t num_compressed_blocks = 0; size_t num_uncompressed_blocks = 0; size_t total_decomp_size = 0; for (size_t i = 0; i < compinfo.size(); ++i) { num_compressed_blocks += compinfo[i].num_compressed_blocks; num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks; total_decomp_size += compinfo[i].max_uncompressed_size; } CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found"); rmm::device_buffer decomp_data(total_decomp_size, stream); rmm::device_uvector<gpu_inflate_input_s> inflate_in( num_compressed_blocks + num_uncompressed_blocks, stream); rmm::device_uvector<gpu_inflate_status_s> inflate_out(num_compressed_blocks, stream); // Parse again to populate the decompression input/output buffers size_t decomp_offset = 0; uint32_t max_uncomp_block_size = 0; uint32_t start_pos = 0; uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks; for (size_t i = 0; i < compinfo.size(); ++i) { auto dst_base = static_cast<uint8_t*>(decomp_data.data()); compinfo[i].uncompressed_data = dst_base + decomp_offset; compinfo[i].decctl = inflate_in.data() + start_pos; compinfo[i].decstatus = inflate_out.data() + start_pos; compinfo[i].copyctl = inflate_in.data() + start_pos_uncomp; stream_info[i].dst_pos = decomp_offset; decomp_offset += compinfo[i].max_uncompressed_size; start_pos += compinfo[i].num_compressed_blocks; start_pos_uncomp += compinfo[i].num_uncompressed_blocks; max_uncomp_block_size = ::max(max_uncomp_block_size, compinfo[i].max_uncompressed_block_size); } compinfo.host_to_device(stream); gpu::ParseCompressedStripeData(compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(), decompressor->GetLog2MaxCompressionRatio(), stream); // Dispatch batches of blocks to decompress if (num_compressed_blocks > 0) { switch (decompressor->GetKind()) { case orc::ZLIB: CUDA_TRY( gpuinflate(inflate_in.data(), inflate_out.data(), num_compressed_blocks, 0, stream)); break; case orc::SNAPPY: if (nvcomp_integration::is_stable_enabled()) { device_span<gpu_inflate_input_s> inflate_in_view{inflate_in.data(), num_compressed_blocks}; device_span<gpu_inflate_status_s> inflate_out_view{inflate_out.data(), num_compressed_blocks}; snappy_decompress(inflate_in_view, inflate_out_view, max_uncomp_block_size, stream); } else { CUDA_TRY( gpu_unsnap(inflate_in.data(), inflate_out.data(), num_compressed_blocks, stream)); } break; default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break; } } if (num_uncompressed_blocks > 0) { CUDA_TRY(gpu_copy_uncompressed_blocks( inflate_in.data() + num_compressed_blocks, num_uncompressed_blocks, stream)); } gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream); // Update the stream information with the updated uncompressed info // TBD: We could update the value from the information we already // have in stream_info[], but using the gpu results also updates // max_uncompressed_size to the actual uncompressed size, or zero if // decompression failed. compinfo.device_to_host(stream, true); const size_t num_columns = chunks.size().second; for (size_t i = 0; i < num_stripes; ++i) { for (size_t j = 0; j < num_columns; ++j) { auto& chunk = chunks[i][j]; for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) { if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) { chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data; chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size; } } } } if (row_groups.size().first) { chunks.host_to_device(stream); row_groups.host_to_device(stream); gpu::ParseRowGroupIndex(row_groups.base_device_ptr(), compinfo.device_ptr(), chunks.base_device_ptr(), num_columns, num_stripes, row_groups.size().first, row_index_stride, use_base_stride, stream); } return decomp_data; } /** * @brief Updates null mask of columns whose parent is a struct column. * If struct column has null element, that row would be * skipped while writing child column in ORC, so we need to insert the missing null * elements in child column. * There is another behavior from pyspark, where if the child column doesn't have any null * elements, it will not have present stream, so in that case parent null mask need to be * copied to child column. * * @param chunks Vector of list of column chunk descriptors * @param out_buffers Output columns' device buffers * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource to use for device memory allocation */ void update_null_mask(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, std::vector<column_buffer>& out_buffers, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { const auto num_stripes = chunks.size().first; const auto num_columns = chunks.size().second; bool is_mask_updated = false; for (size_t col_idx = 0; col_idx < num_columns; ++col_idx) { if (chunks[0][col_idx].parent_validity_info.valid_map_base != nullptr) { if (not is_mask_updated) { chunks.device_to_host(stream, true); is_mask_updated = true; } auto parent_valid_map_base = chunks[0][col_idx].parent_validity_info.valid_map_base; auto child_valid_map_base = out_buffers[col_idx].null_mask(); auto child_mask_len = chunks[0][col_idx].column_num_rows - chunks[0][col_idx].parent_validity_info.null_count; auto parent_mask_len = chunks[0][col_idx].column_num_rows; if (child_valid_map_base != nullptr) { rmm::device_uvector<uint32_t> dst_idx(child_mask_len, stream); // Copy indexes at which the parent has valid value. thrust::copy_if(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + parent_mask_len, dst_idx.begin(), [parent_valid_map_base] __device__(auto idx) { return bit_is_set(parent_valid_map_base, idx); }); auto merged_null_mask = cudf::detail::create_null_mask( parent_mask_len, mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr); auto merged_mask = static_cast<bitmask_type*>(merged_null_mask.data()); uint32_t* dst_idx_ptr = dst_idx.data(); // Copy child valid bits from child column to valid indexes, this will merge both child and // parent null masks thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + dst_idx.size(), [child_valid_map_base, dst_idx_ptr, merged_mask] __device__(auto idx) { if (bit_is_set(child_valid_map_base, idx)) { cudf::set_bit(merged_mask, dst_idx_ptr[idx]); }; }); out_buffers[col_idx]._null_mask = std::move(merged_null_mask); } else { // Since child column doesn't have a mask, copy parent null mask auto mask_size = bitmask_allocation_size_bytes(parent_mask_len); out_buffers[col_idx]._null_mask = rmm::device_buffer(static_cast<void*>(parent_valid_map_base), mask_size, stream, mr); } } } thrust::counting_iterator<int> col_idx_it(0); thrust::counting_iterator<int> stripe_idx_it(0); if (is_mask_updated) { // Update chunks with pointers to column data which might have been changed. std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) { std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) { auto& chunk = chunks[stripe_idx][col_idx]; chunk.valid_map_base = out_buffers[col_idx].null_mask(); }); }); chunks.host_to_device(stream, true); } } /** * @brief Compute the per-stripe prefix sum of null count, for each struct column in the current * layer. */ void scan_null_counts(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> const& chunks, cudf::host_span<rmm::device_uvector<uint32_t>> prefix_sums, rmm::cuda_stream_view stream) { auto const num_stripes = chunks.size().first; if (num_stripes == 0) return; auto const num_columns = chunks.size().second; std::vector<thrust::pair<size_type, cudf::device_span<uint32_t>>> prefix_sums_to_update; for (auto col_idx = 0ul; col_idx < num_columns; ++col_idx) { // Null counts sums are only needed for children of struct columns if (chunks[0][col_idx].type_kind == STRUCT) { prefix_sums_to_update.emplace_back(col_idx, prefix_sums[col_idx]); } } auto const d_prefix_sums_to_update = cudf::detail::make_device_uvector_async(prefix_sums_to_update, stream); thrust::for_each(rmm::exec_policy(stream), d_prefix_sums_to_update.begin(), d_prefix_sums_to_update.end(), [chunks = cudf::detail::device_2dspan<gpu::ColumnDesc const>{chunks}] __device__( auto const& idx_psums) { auto const col_idx = idx_psums.first; auto const psums = idx_psums.second; thrust::transform( thrust::seq, thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + psums.size(), psums.begin(), [&](auto stripe_idx) { return chunks[stripe_idx][col_idx].null_count; }); thrust::inclusive_scan(thrust::seq, psums.begin(), psums.end(), psums.begin()); }); // `prefix_sums_to_update` goes out of scope, copy has to be done before we return stream.synchronize(); } void reader::impl::decode_stream_data(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, size_t num_dicts, size_t skip_rows, timezone_table_view tz_table, cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups, size_t row_index_stride, std::vector<column_buffer>& out_buffers, size_t level, rmm::cuda_stream_view stream) { const auto num_stripes = chunks.size().first; const auto num_columns = chunks.size().second; thrust::counting_iterator<int> col_idx_it(0); thrust::counting_iterator<int> stripe_idx_it(0); // Update chunks with pointers to column data std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) { std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) { auto& chunk = chunks[stripe_idx][col_idx]; chunk.column_data_base = out_buffers[col_idx].data(); chunk.valid_map_base = out_buffers[col_idx].null_mask(); }); }); // Allocate global dictionary for deserializing rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream); chunks.host_to_device(stream, true); gpu::DecodeNullsAndStringDictionaries( chunks.base_device_ptr(), global_dict.data(), num_columns, num_stripes, skip_rows, stream); if (level > 0) { // Update nullmasks for children if parent was a struct and had null mask update_null_mask(chunks, out_buffers, stream, _mr); } // Update the null map for child columns gpu::DecodeOrcColumnData(chunks.base_device_ptr(), global_dict.data(), row_groups, num_columns, num_stripes, skip_rows, tz_table, row_groups.size().first, row_index_stride, level, stream); chunks.device_to_host(stream, true); std::for_each(col_idx_it + 0, col_idx_it + num_columns, [&](auto col_idx) { out_buffers[col_idx].null_count() = std::accumulate(stripe_idx_it + 0, stripe_idx_it + num_stripes, 0, [&](auto null_count, auto const stripe_idx) { return null_count + chunks[stripe_idx][col_idx].null_count; }); }); } // Aggregate child column metadata per stripe and per column void reader::impl::aggregate_child_meta(cudf::detail::host_2dspan<gpu::ColumnDesc> chunks, cudf::detail::host_2dspan<gpu::RowGroup> row_groups, std::vector<column_buffer>& out_buffers, std::vector<orc_column_meta> const& list_col, const size_type level) { const auto num_of_stripes = chunks.size().first; const auto num_of_rowgroups = row_groups.size().first; const auto num_parent_cols = selected_columns.levels[level].size(); const auto num_child_cols = selected_columns.levels[level + 1].size(); const auto number_of_child_chunks = num_child_cols * num_of_stripes; auto& num_child_rows = _col_meta.num_child_rows; auto& parent_column_data = _col_meta.parent_column_data; // Reset the meta to store child column details. num_child_rows.resize(selected_columns.levels[level + 1].size()); std::fill(num_child_rows.begin(), num_child_rows.end(), 0); parent_column_data.resize(number_of_child_chunks); _col_meta.parent_column_index.resize(number_of_child_chunks); _col_meta.child_start_row.resize(number_of_child_chunks); _col_meta.num_child_rows_per_stripe.resize(number_of_child_chunks); _col_meta.rwgrp_meta.resize(num_of_rowgroups * num_child_cols); auto child_start_row = cudf::detail::host_2dspan<uint32_t>( _col_meta.child_start_row.data(), num_of_stripes, num_child_cols); auto num_child_rows_per_stripe = cudf::detail::host_2dspan<uint32_t>( _col_meta.num_child_rows_per_stripe.data(), num_of_stripes, num_child_cols); auto rwgrp_meta = cudf::detail::host_2dspan<reader_column_meta::row_group_meta>( _col_meta.rwgrp_meta.data(), num_of_rowgroups, num_child_cols); int index = 0; // number of child column processed // For each parent column, update its child column meta for each stripe. std::for_each(list_col.cbegin(), list_col.cend(), [&](const auto p_col) { const auto parent_col_idx = _col_meta.orc_col_map[level][p_col.id]; auto start_row = 0; auto processed_row_groups = 0; for (size_t stripe_id = 0; stripe_id < num_of_stripes; stripe_id++) { // Aggregate num_rows and start_row from processed parent columns per row groups if (num_of_rowgroups) { auto stripe_num_row_groups = chunks[stripe_id][parent_col_idx].num_rowgroups; auto processed_child_rows = 0; for (size_t rowgroup_id = 0; rowgroup_id < stripe_num_row_groups; rowgroup_id++, processed_row_groups++) { const auto child_rows = row_groups[processed_row_groups][parent_col_idx].num_child_rows; for (size_type id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; rwgrp_meta[processed_row_groups][child_col_idx].start_row = processed_child_rows; rwgrp_meta[processed_row_groups][child_col_idx].num_rows = child_rows; } processed_child_rows += child_rows; } } // Aggregate start row, number of rows per chunk and total number of rows in a column const auto child_rows = chunks[stripe_id][parent_col_idx].num_child_rows; for (size_type id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; num_child_rows[child_col_idx] += child_rows; num_child_rows_per_stripe[stripe_id][child_col_idx] = child_rows; // start row could be different for each column when there is nesting at each stripe level child_start_row[stripe_id][child_col_idx] = (stripe_id == 0) ? 0 : start_row; } start_row += child_rows; } // Parent column null mask and null count would be required for child column // to adjust its nullmask. auto type = out_buffers[parent_col_idx].type.id(); auto parent_null_count = static_cast<uint32_t>(out_buffers[parent_col_idx].null_count()); auto parent_valid_map = out_buffers[parent_col_idx].null_mask(); auto num_rows = out_buffers[parent_col_idx].size; for (size_type id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; _col_meta.parent_column_index[child_col_idx] = parent_col_idx; if (type == type_id::STRUCT) { parent_column_data[child_col_idx] = {parent_valid_map, parent_null_count}; // Number of rows in child will remain same as parent in case of struct column num_child_rows[child_col_idx] = num_rows; } else { parent_column_data[child_col_idx] = {nullptr, 0}; } } index += p_col.num_children; }); } std::string get_map_child_col_name(size_t const idx) { return (idx == 0) ? "key" : "value"; } std::unique_ptr<column> reader::impl::create_empty_column(const size_type orc_col_id, column_name_info& schema_info, rmm::cuda_stream_view stream) { schema_info.name = _metadata.column_name(0, orc_col_id); auto const type = to_type_id( _metadata.get_schema(orc_col_id), _use_np_dtypes, _timestamp_type.id(), decimal_column_type( _decimal_cols_as_float, decimal128_columns, _metadata.per_file_metadata[0], orc_col_id)); int32_t scale = 0; std::vector<std::unique_ptr<column>> child_columns; std::unique_ptr<column> out_col = nullptr; auto kind = _metadata.get_col_type(orc_col_id).kind; switch (kind) { case orc::LIST: schema_info.children.emplace_back("offsets"); schema_info.children.emplace_back(""); out_col = make_lists_column( 0, make_empty_column(type_id::INT32), create_empty_column( _metadata.get_col_type(orc_col_id).subtypes[0], schema_info.children.back(), stream), 0, rmm::device_buffer{0, stream}, stream); break; case orc::MAP: { schema_info.children.emplace_back("offsets"); schema_info.children.emplace_back("struct"); const auto child_column_ids = _metadata.get_col_type(orc_col_id).subtypes; for (size_t idx = 0; idx < _metadata.get_col_type(orc_col_id).subtypes.size(); idx++) { auto& children_schema = schema_info.children.back().children; children_schema.emplace_back(""); child_columns.push_back(create_empty_column( child_column_ids[idx], schema_info.children.back().children.back(), stream)); auto name = get_map_child_col_name(idx); children_schema[idx].name = name; } auto struct_col = make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream); out_col = make_lists_column(0, make_empty_column(type_id::INT32), std::move(struct_col), 0, rmm::device_buffer{0, stream}, stream); } break; case orc::STRUCT: for (const auto col : _metadata.get_col_type(orc_col_id).subtypes) { schema_info.children.emplace_back(""); child_columns.push_back(create_empty_column(col, schema_info.children.back(), stream)); } out_col = make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream); break; case orc::DECIMAL: if (type == type_id::DECIMAL64 or type == type_id::DECIMAL128) { scale = -static_cast<int32_t>(_metadata.get_types()[orc_col_id].scale.value_or(0)); } out_col = make_empty_column(data_type(type, scale)); break; default: out_col = make_empty_column(type); } return out_col; } // Adds child column buffers to parent column column_buffer&& reader::impl::assemble_buffer(const size_type orc_col_id, std::vector<std::vector<column_buffer>>& col_buffers, const size_t level, rmm::cuda_stream_view stream) { auto const col_id = _col_meta.orc_col_map[level][orc_col_id]; auto& col_buffer = col_buffers[level][col_id]; col_buffer.name = _metadata.column_name(0, orc_col_id); auto kind = _metadata.get_col_type(orc_col_id).kind; switch (kind) { case orc::LIST: case orc::STRUCT: for (auto const& col : selected_columns.children[orc_col_id]) { col_buffer.children.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream)); } break; case orc::MAP: { std::vector<column_buffer> child_col_buffers; // Get child buffers for (size_t idx = 0; idx < selected_columns.children[orc_col_id].size(); idx++) { auto name = get_map_child_col_name(idx); auto col = selected_columns.children[orc_col_id][idx]; child_col_buffers.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream)); child_col_buffers.back().name = name; } // Create a struct buffer auto num_rows = child_col_buffers[0].size; auto struct_buffer = column_buffer(cudf::data_type(type_id::STRUCT), num_rows, false, stream, _mr); struct_buffer.children = std::move(child_col_buffers); struct_buffer.name = "struct"; col_buffer.children.emplace_back(std::move(struct_buffer)); } break; default: break; } return std::move(col_buffer); } // creates columns along with schema information for each column void reader::impl::create_columns(std::vector<std::vector<column_buffer>>&& col_buffers, std::vector<std::unique_ptr<column>>& out_columns, std::vector<column_name_info>& schema_info, rmm::cuda_stream_view stream) { std::transform(selected_columns.levels[0].begin(), selected_columns.levels[0].end(), std::back_inserter(out_columns), [&](auto const col_meta) { schema_info.emplace_back(""); auto col_buffer = assemble_buffer(col_meta.id, col_buffers, 0, stream); return make_column(col_buffer, &schema_info.back(), stream, _mr); }); } reader::impl::impl(std::vector<std::unique_ptr<datasource>>&& sources, orc_reader_options const& options, rmm::mr::device_memory_resource* mr) : _mr(mr), _sources(std::move(sources)), _metadata{_sources}, selected_columns{_metadata.select_columns(options.get_columns())} { // Override output timestamp resolution if requested if (options.get_timestamp_type().id() != type_id::EMPTY) { _timestamp_type = options.get_timestamp_type(); } // Enable or disable attempt to use row index for parsing _use_index = options.is_enabled_use_index(); // Enable or disable the conversion to numpy-compatible dtypes _use_np_dtypes = options.is_enabled_use_np_dtypes(); // Control decimals conversion _decimal_cols_as_float = options.get_decimal_cols_as_float(); decimal128_columns = options.get_decimal128_columns(); } timezone_table reader::impl::compute_timezone_table( const std::vector<cudf::io::orc::metadata::stripe_source_mapping>& selected_stripes, rmm::cuda_stream_view stream) { if (selected_stripes.empty()) return {}; auto const has_timestamp_column = std::any_of( selected_columns.levels.cbegin(), selected_columns.levels.cend(), [&](auto& col_lvl) { return std::any_of(col_lvl.cbegin(), col_lvl.cend(), [&](auto& col_meta) { return _metadata.get_col_type(col_meta.id).kind == TypeKind::TIMESTAMP; }); }); if (not has_timestamp_column) return {}; return build_timezone_transition_table(selected_stripes[0].stripe_info[0].second->writerTimezone, stream); } table_with_metadata reader::impl::read(size_type skip_rows, size_type num_rows, const std::vector<std::vector<size_type>>& stripes, rmm::cuda_stream_view stream) { // Selected columns at different levels of nesting are stored in different elements // of `selected_columns`; thus, size == 1 means no nested columns CUDF_EXPECTS(skip_rows == 0 or selected_columns.num_levels() == 1, "skip_rows is not supported by nested columns"); std::vector<std::unique_ptr<column>> out_columns; // buffer and stripe data are stored as per nesting level std::vector<std::vector<column_buffer>> out_buffers(selected_columns.num_levels()); std::vector<column_name_info> schema_info; std::vector<std::vector<rmm::device_buffer>> lvl_stripe_data(selected_columns.num_levels()); std::vector<std::vector<rmm::device_uvector<uint32_t>>> null_count_prefix_sums; table_metadata out_metadata; // There are no columns in the table if (selected_columns.num_levels() == 0) return {std::make_unique<table>(), std::move(out_metadata)}; // Select only stripes required (aka row groups) const auto selected_stripes = _metadata.select_stripes(stripes, skip_rows, num_rows); auto const tz_table = compute_timezone_table(selected_stripes, stream); // Iterates through levels of nested columns, child column will be one level down // compared to parent column. for (size_t level = 0; level < selected_columns.num_levels(); level++) { auto& columns_level = selected_columns.levels[level]; // Association between each ORC column and its cudf::column _col_meta.orc_col_map.emplace_back(_metadata.get_num_cols(), -1); std::vector<orc_column_meta> nested_col; bool is_data_empty = false; // Get a list of column data types std::vector<data_type> column_types; for (auto& col : columns_level) { auto col_type = to_type_id( _metadata.get_col_type(col.id), _use_np_dtypes, _timestamp_type.id(), decimal_column_type( _decimal_cols_as_float, decimal128_columns, _metadata.per_file_metadata[0], col.id)); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); CUDF_EXPECTS( (col_type != type_id::DECIMAL64) or (_metadata.get_col_type(col.id).precision <= 18), "Precision of column " + std::string{_metadata.column_name(0, col.id)} + " is over 18, use 128-bit Decimal."); if (col_type == type_id::DECIMAL64 or col_type == type_id::DECIMAL128) { // sign of the scale is changed since cuDF follows c++ libraries like CNL // which uses negative scaling, but liborc and other libraries // follow positive scaling. auto const scale = -static_cast<size_type>(_metadata.get_col_type(col.id).scale.value_or(0)); column_types.emplace_back(col_type, scale); } else { column_types.emplace_back(col_type); } // Map each ORC column to its column _col_meta.orc_col_map[level][col.id] = column_types.size() - 1; // TODO: Once MAP type is supported in cuDF, update this for MAP as well if (col_type == type_id::LIST or col_type == type_id::STRUCT) nested_col.emplace_back(col); } // If no rows or stripes to read, return empty columns if (num_rows <= 0 || selected_stripes.empty()) { std::transform(selected_columns.levels[0].begin(), selected_columns.levels[0].end(), std::back_inserter(out_columns), [&](auto const col_meta) { schema_info.emplace_back(""); return create_empty_column(col_meta.id, schema_info.back(), stream); }); break; } else { // Get the total number of stripes across all input files. size_t total_num_stripes = std::accumulate(selected_stripes.begin(), selected_stripes.end(), 0, [](size_t sum, auto& stripe_source_mapping) { return sum + stripe_source_mapping.stripe_info.size(); }); const auto num_columns = columns_level.size(); cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> chunks( total_num_stripes, num_columns, stream); memset(chunks.base_host_ptr(), 0, chunks.memory_size()); const bool use_index = (_use_index == true) && // Do stripes have row group index _metadata.is_row_grp_idx_present() && // Only use if we don't have much work with complete columns & stripes // TODO: Consider nrows, gpu, and tune the threshold (num_rows > _metadata.get_row_index_stride() && !(_metadata.get_row_index_stride() & 7) && _metadata.get_row_index_stride() > 0 && num_columns * total_num_stripes < 8 * 128) && // Only use if first row is aligned to a stripe boundary // TODO: Fix logic to handle unaligned rows (skip_rows == 0); // Logically view streams as columns std::vector<orc_stream_info> stream_info; null_count_prefix_sums.emplace_back(); null_count_prefix_sums.back().reserve(selected_columns.levels[level].size()); std::generate_n(std::back_inserter(null_count_prefix_sums.back()), selected_columns.levels[level].size(), [&]() { return cudf::detail::make_zeroed_device_uvector_async<uint32_t>( total_num_stripes, stream); }); // Tracker for eventually deallocating compressed and uncompressed data auto& stripe_data = lvl_stripe_data[level]; size_t stripe_start_row = 0; size_t num_dict_entries = 0; size_t num_rowgroups = 0; int stripe_idx = 0; std::vector<std::pair<std::future<size_t>, size_t>> read_tasks; for (auto const& stripe_source_mapping : selected_stripes) { // Iterate through the source files selected stripes for (auto const& stripe : stripe_source_mapping.stripe_info) { const auto stripe_info = stripe.first; const auto stripe_footer = stripe.second; auto stream_count = stream_info.size(); const auto total_data_size = gather_stream_info(stripe_idx, stripe_info, stripe_footer, _col_meta.orc_col_map[level], _metadata.get_types(), use_index, &num_dict_entries, chunks, stream_info, level == 0); if (total_data_size == 0) { CUDF_EXPECTS(stripe_info->indexLength == 0, "Invalid index rowgroup stream data"); // In case ROW GROUP INDEX is not present and all columns are structs with no null // stream, there is nothing to read at this level. auto fn_check_dtype = [](auto dtype) { return dtype.id() == type_id::STRUCT; }; CUDF_EXPECTS(std::all_of(column_types.begin(), column_types.end(), fn_check_dtype), "Expected streams data within stripe"); is_data_empty = true; } stripe_data.emplace_back(total_data_size, stream); auto dst_base = static_cast<uint8_t*>(stripe_data.back().data()); // Coalesce consecutive streams into one read while (not is_data_empty and stream_count < stream_info.size()) { const auto d_dst = dst_base + stream_info[stream_count].dst_pos; const auto offset = stream_info[stream_count].offset; auto len = stream_info[stream_count].length; stream_count++; while (stream_count < stream_info.size() && stream_info[stream_count].offset == offset + len) { len += stream_info[stream_count].length; stream_count++; } if (_metadata.per_file_metadata[stripe_source_mapping.source_idx] .source->is_device_read_preferred(len)) { read_tasks.push_back( std::make_pair(_metadata.per_file_metadata[stripe_source_mapping.source_idx] .source->device_read_async(offset, len, d_dst, stream), len)); } else { const auto buffer = _metadata.per_file_metadata[stripe_source_mapping.source_idx].source->host_read( offset, len); CUDF_EXPECTS(buffer->size() == len, "Unexpected discrepancy in bytes read."); CUDA_TRY(hipMemcpyAsync( d_dst, buffer->data(), len, hipMemcpyHostToDevice, stream.value())); stream.synchronize(); } } const auto num_rows_per_stripe = stripe_info->numberOfRows; const auto rowgroup_id = num_rowgroups; auto stripe_num_rowgroups = 0; if (use_index) { stripe_num_rowgroups = (num_rows_per_stripe + _metadata.get_row_index_stride() - 1) / _metadata.get_row_index_stride(); } // Update chunks to reference streams pointers for (size_t col_idx = 0; col_idx < num_columns; col_idx++) { auto& chunk = chunks[stripe_idx][col_idx]; // start row, number of rows in a each stripe and total number of rows // may change in lower levels of nesting chunk.start_row = (level == 0) ? stripe_start_row : _col_meta.child_start_row[stripe_idx * num_columns + col_idx]; chunk.num_rows = (level == 0) ? stripe_info->numberOfRows : _col_meta.num_child_rows_per_stripe[stripe_idx * num_columns + col_idx]; chunk.column_num_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[col_idx]; chunk.parent_validity_info = (level == 0) ? column_validity_info{} : _col_meta.parent_column_data[col_idx]; chunk.parent_null_count_prefix_sums = (level == 0) ? nullptr : null_count_prefix_sums[level - 1][_col_meta.parent_column_index[col_idx]].data(); chunk.encoding_kind = stripe_footer->columns[columns_level[col_idx].id].kind; chunk.type_kind = _metadata.per_file_metadata[stripe_source_mapping.source_idx] .ff.types[columns_level[col_idx].id] .kind; // num_child_rows for a struct column will be same, for other nested types it will be // calculated. chunk.num_child_rows = (chunk.type_kind != orc::STRUCT) ? 0 : chunk.num_rows; chunk.dtype_id = column_types[col_idx].id(); chunk.decimal_scale = _metadata.per_file_metadata[stripe_source_mapping.source_idx] .ff.types[columns_level[col_idx].id] .scale.value_or(0); chunk.rowgroup_id = rowgroup_id; chunk.dtype_len = (column_types[col_idx].id() == type_id::STRING) ? sizeof(string_index_pair) : ((column_types[col_idx].id() == type_id::LIST) or (column_types[col_idx].id() == type_id::STRUCT)) ? sizeof(size_type) : cudf::size_of(column_types[col_idx]); chunk.num_rowgroups = stripe_num_rowgroups; if (chunk.type_kind == orc::TIMESTAMP) { chunk.timestamp_type_id = _timestamp_type.id(); } if (not is_data_empty) { for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) { chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos; } } } stripe_start_row += num_rows_per_stripe; num_rowgroups += stripe_num_rowgroups; stripe_idx++; } } for (auto& task : read_tasks) { CUDF_EXPECTS(task.first.get() == task.second, "Unexpected discrepancy in bytes read."); } // Process dataset chunk pages into output columns if (stripe_data.size() != 0) { auto row_groups = cudf::detail::hostdevice_2dvector<gpu::RowGroup>(num_rowgroups, num_columns, stream); if (level > 0 and row_groups.size().first) { cudf::host_span<gpu::RowGroup> row_groups_span(row_groups.base_host_ptr(), num_rowgroups * num_columns); auto& rw_grp_meta = _col_meta.rwgrp_meta; // Update start row and num rows per row group std::transform(rw_grp_meta.begin(), rw_grp_meta.end(), row_groups_span.begin(), rw_grp_meta.begin(), [&](auto meta, auto& row_grp) { row_grp.num_rows = meta.num_rows; row_grp.start_row = meta.start_row; return meta; }); } // Setup row group descriptors if using indexes if (_metadata.per_file_metadata[0].ps.compression != orc::NONE and not is_data_empty) { auto decomp_data = decompress_stripe_data(chunks, stripe_data, _metadata.per_file_metadata[0].decompressor.get(), stream_info, total_num_stripes, row_groups, _metadata.get_row_index_stride(), level == 0, stream); stripe_data.clear(); stripe_data.push_back(std::move(decomp_data)); } else { if (row_groups.size().first) { chunks.host_to_device(stream); row_groups.host_to_device(stream); gpu::ParseRowGroupIndex(row_groups.base_device_ptr(), nullptr, chunks.base_device_ptr(), num_columns, total_num_stripes, num_rowgroups, _metadata.get_row_index_stride(), level == 0, stream); } } for (size_t i = 0; i < column_types.size(); ++i) { bool is_nullable = false; for (size_t j = 0; j < total_num_stripes; ++j) { if (chunks[j][i].strm_len[gpu::CI_PRESENT] != 0) { is_nullable = true; break; } } auto is_list_type = (column_types[i].id() == type_id::LIST); auto n_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[i]; // For list column, offset column will be always size + 1 if (is_list_type) n_rows++; out_buffers[level].emplace_back(column_types[i], n_rows, is_nullable, stream, _mr); } if (not is_data_empty) { decode_stream_data(chunks, num_dict_entries, skip_rows, tz_table.view(), row_groups, _metadata.get_row_index_stride(), out_buffers[level], level, stream); } // Extract information to process nested child columns if (nested_col.size()) { if (not is_data_empty) { scan_null_counts(chunks, null_count_prefix_sums[level], stream); } row_groups.device_to_host(stream, true); aggregate_child_meta(chunks, row_groups, out_buffers[level], nested_col, level); } // ORC stores number of elements at each row, so we need to generate offsets from that if (nested_col.size()) { std::vector<list_buffer_data> buff_data; std::for_each( out_buffers[level].begin(), out_buffers[level].end(), [&buff_data](auto& out_buffer) { if (out_buffer.type.id() == type_id::LIST) { auto data = static_cast<size_type*>(out_buffer.data()); buff_data.emplace_back(list_buffer_data{data, out_buffer.size}); } }); if (buff_data.size()) { auto const dev_buff_data = cudf::detail::make_device_uvector_async(buff_data, stream); generate_offsets_for_list(dev_buff_data, stream); } } } } } // If out_columns is empty, then create columns from buffer. if (out_columns.empty()) { create_columns(std::move(out_buffers), out_columns, schema_info, stream); } // Return column names (must match order of returned columns) out_metadata.column_names.reserve(schema_info.size()); std::transform(schema_info.cbegin(), schema_info.cend(), std::back_inserter(out_metadata.column_names), [](auto info) { return info.name; }); out_metadata.schema_info = std::move(schema_info); for (const auto& meta : _metadata.per_file_metadata) { for (const auto& kv : meta.ff.metadata) { out_metadata.user_data.insert({kv.name, kv.value}); } } return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)}; } // Forward to implementation reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources, orc_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { _impl = std::make_unique<impl>(std::move(sources), options, mr); } // Destructor within this translation unit reader::~reader() = default; // Forward to implementation table_with_metadata reader::read(orc_reader_options const& options, rmm::cuda_stream_view stream) { return _impl->read( options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream); } } // namespace orc } // namespace detail } // namespace io } // namespace cudf
bf7b154a338dd3bcf91d3c0851a9be84119a2498.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file reader_impl.cu * @brief cuDF-IO ORC reader class implementation */ #include "orc.h" #include "orc_gpu.h" #include "reader_impl.hpp" #include "timezone.cuh" #include <io/comp/gpuinflate.h> #include <io/utilities/config_utils.hpp> #include <io/utilities/time_utils.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <nvcomp/snappy.h> #include <algorithm> #include <iterator> namespace cudf { namespace io { namespace detail { namespace orc { using namespace cudf::io::orc; namespace { /** * @brief Function that translates ORC data kind to cuDF type enum */ constexpr type_id to_type_id(const orc::SchemaType& schema, bool use_np_dtypes, type_id timestamp_type_id, type_id decimal_type_id) { switch (schema.kind) { case orc::BOOLEAN: return type_id::BOOL8; case orc::BYTE: return type_id::INT8; case orc::SHORT: return type_id::INT16; case orc::INT: return type_id::INT32; case orc::LONG: return type_id::INT64; case orc::FLOAT: return type_id::FLOAT32; case orc::DOUBLE: return type_id::FLOAT64; case orc::STRING: case orc::BINARY: case orc::VARCHAR: case orc::CHAR: // Variable-length types can all be mapped to STRING return type_id::STRING; case orc::TIMESTAMP: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; case orc::DATE: // There isn't a (DAYS -> np.dtype) mapping return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS; case orc::DECIMAL: return decimal_type_id; // Need to update once cuDF plans to support map type case orc::MAP: case orc::LIST: return type_id::LIST; case orc::STRUCT: return type_id::STRUCT; default: break; } return type_id::EMPTY; } constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos( const orc::StreamKind kind, uint32_t skip_count, bool non_child) { switch (kind) { case orc::DATA: skip_count += 1; skip_count |= (skip_count & 0xff) << 8; return std::make_pair(gpu::CI_DATA, skip_count); case orc::LENGTH: case orc::SECONDARY: skip_count += 1; skip_count |= (skip_count & 0xff) << 16; return std::make_pair(gpu::CI_DATA2, skip_count); case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count); case orc::PRESENT: skip_count += (non_child ? 1 : 0); return std::make_pair(gpu::CI_PRESENT, skip_count); case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count); default: // Skip this stream as it's not strictly required return std::make_pair(gpu::CI_NUM_STREAMS, 0); } } /** * @brief struct to store buffer data and size of list buffer */ struct list_buffer_data { size_type* data; size_type size; }; // Generates offsets for list buffer from number of elements in a row. void generate_offsets_for_list(rmm::device_uvector<list_buffer_data> const& buff_data, rmm::cuda_stream_view stream) { auto transformer = [] __device__(list_buffer_data list_data) { thrust::exclusive_scan( thrust::seq, list_data.data, list_data.data + list_data.size, list_data.data); }; thrust::for_each(rmm::exec_policy(stream), buff_data.begin(), buff_data.end(), transformer); stream.synchronize(); } /** * @brief Struct that maps ORC streams to columns */ struct orc_stream_info { orc_stream_info() = default; explicit orc_stream_info( uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_) : offset(offset_), dst_pos(dst_pos_), length(length_), gdf_idx(gdf_idx_), stripe_idx(stripe_idx_) { } uint64_t offset; // offset in file size_t dst_pos; // offset in memory relative to start of compressed stripe data size_t length; // length in file uint32_t gdf_idx; // column index uint32_t stripe_idx; // stripe index }; /** * @brief Function that populates column descriptors stream/chunk */ size_t gather_stream_info(const size_t stripe_index, const orc::StripeInformation* stripeinfo, const orc::StripeFooter* stripefooter, const std::vector<int>& orc2gdf, const std::vector<orc::SchemaType> types, bool use_index, size_t* num_dictionary_entries, cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, std::vector<orc_stream_info>& stream_info, bool apply_struct_map) { uint64_t src_offset = 0; uint64_t dst_offset = 0; for (const auto& stream : stripefooter->streams) { if (!stream.column_id || *stream.column_id >= orc2gdf.size()) { dst_offset += stream.length; continue; } auto const column_id = *stream.column_id; auto col = orc2gdf[column_id]; if (col == -1 and apply_struct_map) { // A struct-type column has no data itself, but rather child columns // for each of its fields. There is only a PRESENT stream, which // needs to be included for the reader. const auto schema_type = types[column_id]; if (schema_type.subtypes.size() != 0) { if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) { for (const auto& idx : schema_type.subtypes) { auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1; if (child_idx >= 0) { col = child_idx; auto& chunk = chunks[stripe_index][col]; chunk.strm_id[gpu::CI_PRESENT] = stream_info.size(); chunk.strm_len[gpu::CI_PRESENT] = stream.length; } } } } } if (col != -1) { if (src_offset >= stripeinfo->indexLength || use_index) { // NOTE: skip_count field is temporarily used to track index ordering auto& chunk = chunks[stripe_index][col]; const auto idx = get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[column_id]); if (idx.first < gpu::CI_NUM_STREAMS) { chunk.strm_id[idx.first] = stream_info.size(); chunk.strm_len[idx.first] = stream.length; chunk.skip_count = idx.second; if (idx.first == gpu::CI_DICTIONARY) { chunk.dictionary_start = *num_dictionary_entries; chunk.dict_len = stripefooter->columns[column_id].dictionarySize; *num_dictionary_entries += stripefooter->columns[column_id].dictionarySize; } } } stream_info.emplace_back( stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index); dst_offset += stream.length; } src_offset += stream.length; } return dst_offset; } /** * @brief Determines cuDF type of an ORC Decimal column. */ auto decimal_column_type(const std::vector<std::string>& float64_columns, const std::vector<std::string>& decimal128_columns, cudf::io::orc::metadata& metadata, int column_index) { auto const& column_path = metadata.column_path(column_index); auto is_column_in = [&](const std::vector<std::string>& cols) { return std::find(cols.cbegin(), cols.cend(), column_path) != cols.end(); }; auto const user_selected_float64 = is_column_in(float64_columns); auto const user_selected_decimal128 = is_column_in(decimal128_columns); CUDF_EXPECTS(not user_selected_float64 or not user_selected_decimal128, "Both decimal128 and float64 types selected for column " + column_path); if (user_selected_float64) return type_id::FLOAT64; if (user_selected_decimal128) return type_id::DECIMAL128; return type_id::DECIMAL64; } } // namespace void snappy_decompress(device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_stat, size_t max_uncomp_page_size, rmm::cuda_stream_view stream) { size_t num_blocks = comp_in.size(); size_t temp_size; auto status = nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_page_size, &temp_size); CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "Unable to get scratch size for snappy decompression"); rmm::device_buffer scratch(temp_size, stream); rmm::device_uvector<void const*> compressed_data_ptrs(num_blocks, stream); rmm::device_uvector<size_t> compressed_data_sizes(num_blocks, stream); rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream); rmm::device_uvector<size_t> uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream); // Prepare the vectors auto comp_it = thrust::make_zip_iterator(compressed_data_ptrs.begin(), compressed_data_sizes.begin(), uncompressed_data_ptrs.begin(), uncompressed_data_sizes.data()); thrust::transform(rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(gpu_inflate_input_s in) { return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice, in.dstSize); }); status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.data(), compressed_data_sizes.data(), uncompressed_data_sizes.data(), actual_uncompressed_data_sizes.data(), num_blocks, scratch.data(), scratch.size(), uncompressed_data_ptrs.data(), statuses.data(), stream.value()); CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "unable to perform snappy decompression"); CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream), statuses.begin(), statuses.end(), thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)), "Error during snappy decompression"); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator(0), num_blocks, [=, actual_uncomp_sizes = actual_uncompressed_data_sizes.data()] __device__(auto i) { comp_stat[i].bytes_written = actual_uncomp_sizes[i]; comp_stat[i].status = 0; }); } rmm::device_buffer reader::impl::decompress_stripe_data( cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, const std::vector<rmm::device_buffer>& stripe_data, const OrcDecompressor* decompressor, std::vector<orc_stream_info>& stream_info, size_t num_stripes, cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups, size_t row_index_stride, bool use_base_stride, rmm::cuda_stream_view stream) { // Parse the columns' compressed info hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream); for (const auto& info : stream_info) { compinfo.insert(gpu::CompressedStreamInfo( static_cast<const uint8_t*>(stripe_data[info.stripe_idx].data()) + info.dst_pos, info.length)); } compinfo.host_to_device(stream); gpu::ParseCompressedStripeData(compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(), decompressor->GetLog2MaxCompressionRatio(), stream); compinfo.device_to_host(stream, true); // Count the exact number of compressed blocks size_t num_compressed_blocks = 0; size_t num_uncompressed_blocks = 0; size_t total_decomp_size = 0; for (size_t i = 0; i < compinfo.size(); ++i) { num_compressed_blocks += compinfo[i].num_compressed_blocks; num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks; total_decomp_size += compinfo[i].max_uncompressed_size; } CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found"); rmm::device_buffer decomp_data(total_decomp_size, stream); rmm::device_uvector<gpu_inflate_input_s> inflate_in( num_compressed_blocks + num_uncompressed_blocks, stream); rmm::device_uvector<gpu_inflate_status_s> inflate_out(num_compressed_blocks, stream); // Parse again to populate the decompression input/output buffers size_t decomp_offset = 0; uint32_t max_uncomp_block_size = 0; uint32_t start_pos = 0; uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks; for (size_t i = 0; i < compinfo.size(); ++i) { auto dst_base = static_cast<uint8_t*>(decomp_data.data()); compinfo[i].uncompressed_data = dst_base + decomp_offset; compinfo[i].decctl = inflate_in.data() + start_pos; compinfo[i].decstatus = inflate_out.data() + start_pos; compinfo[i].copyctl = inflate_in.data() + start_pos_uncomp; stream_info[i].dst_pos = decomp_offset; decomp_offset += compinfo[i].max_uncompressed_size; start_pos += compinfo[i].num_compressed_blocks; start_pos_uncomp += compinfo[i].num_uncompressed_blocks; max_uncomp_block_size = std::max(max_uncomp_block_size, compinfo[i].max_uncompressed_block_size); } compinfo.host_to_device(stream); gpu::ParseCompressedStripeData(compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(), decompressor->GetLog2MaxCompressionRatio(), stream); // Dispatch batches of blocks to decompress if (num_compressed_blocks > 0) { switch (decompressor->GetKind()) { case orc::ZLIB: CUDA_TRY( gpuinflate(inflate_in.data(), inflate_out.data(), num_compressed_blocks, 0, stream)); break; case orc::SNAPPY: if (nvcomp_integration::is_stable_enabled()) { device_span<gpu_inflate_input_s> inflate_in_view{inflate_in.data(), num_compressed_blocks}; device_span<gpu_inflate_status_s> inflate_out_view{inflate_out.data(), num_compressed_blocks}; snappy_decompress(inflate_in_view, inflate_out_view, max_uncomp_block_size, stream); } else { CUDA_TRY( gpu_unsnap(inflate_in.data(), inflate_out.data(), num_compressed_blocks, stream)); } break; default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break; } } if (num_uncompressed_blocks > 0) { CUDA_TRY(gpu_copy_uncompressed_blocks( inflate_in.data() + num_compressed_blocks, num_uncompressed_blocks, stream)); } gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream); // Update the stream information with the updated uncompressed info // TBD: We could update the value from the information we already // have in stream_info[], but using the gpu results also updates // max_uncompressed_size to the actual uncompressed size, or zero if // decompression failed. compinfo.device_to_host(stream, true); const size_t num_columns = chunks.size().second; for (size_t i = 0; i < num_stripes; ++i) { for (size_t j = 0; j < num_columns; ++j) { auto& chunk = chunks[i][j]; for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) { if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) { chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data; chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size; } } } } if (row_groups.size().first) { chunks.host_to_device(stream); row_groups.host_to_device(stream); gpu::ParseRowGroupIndex(row_groups.base_device_ptr(), compinfo.device_ptr(), chunks.base_device_ptr(), num_columns, num_stripes, row_groups.size().first, row_index_stride, use_base_stride, stream); } return decomp_data; } /** * @brief Updates null mask of columns whose parent is a struct column. * If struct column has null element, that row would be * skipped while writing child column in ORC, so we need to insert the missing null * elements in child column. * There is another behavior from pyspark, where if the child column doesn't have any null * elements, it will not have present stream, so in that case parent null mask need to be * copied to child column. * * @param chunks Vector of list of column chunk descriptors * @param out_buffers Output columns' device buffers * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource to use for device memory allocation */ void update_null_mask(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, std::vector<column_buffer>& out_buffers, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { const auto num_stripes = chunks.size().first; const auto num_columns = chunks.size().second; bool is_mask_updated = false; for (size_t col_idx = 0; col_idx < num_columns; ++col_idx) { if (chunks[0][col_idx].parent_validity_info.valid_map_base != nullptr) { if (not is_mask_updated) { chunks.device_to_host(stream, true); is_mask_updated = true; } auto parent_valid_map_base = chunks[0][col_idx].parent_validity_info.valid_map_base; auto child_valid_map_base = out_buffers[col_idx].null_mask(); auto child_mask_len = chunks[0][col_idx].column_num_rows - chunks[0][col_idx].parent_validity_info.null_count; auto parent_mask_len = chunks[0][col_idx].column_num_rows; if (child_valid_map_base != nullptr) { rmm::device_uvector<uint32_t> dst_idx(child_mask_len, stream); // Copy indexes at which the parent has valid value. thrust::copy_if(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + parent_mask_len, dst_idx.begin(), [parent_valid_map_base] __device__(auto idx) { return bit_is_set(parent_valid_map_base, idx); }); auto merged_null_mask = cudf::detail::create_null_mask( parent_mask_len, mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr); auto merged_mask = static_cast<bitmask_type*>(merged_null_mask.data()); uint32_t* dst_idx_ptr = dst_idx.data(); // Copy child valid bits from child column to valid indexes, this will merge both child and // parent null masks thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + dst_idx.size(), [child_valid_map_base, dst_idx_ptr, merged_mask] __device__(auto idx) { if (bit_is_set(child_valid_map_base, idx)) { cudf::set_bit(merged_mask, dst_idx_ptr[idx]); }; }); out_buffers[col_idx]._null_mask = std::move(merged_null_mask); } else { // Since child column doesn't have a mask, copy parent null mask auto mask_size = bitmask_allocation_size_bytes(parent_mask_len); out_buffers[col_idx]._null_mask = rmm::device_buffer(static_cast<void*>(parent_valid_map_base), mask_size, stream, mr); } } } thrust::counting_iterator<int> col_idx_it(0); thrust::counting_iterator<int> stripe_idx_it(0); if (is_mask_updated) { // Update chunks with pointers to column data which might have been changed. std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) { std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) { auto& chunk = chunks[stripe_idx][col_idx]; chunk.valid_map_base = out_buffers[col_idx].null_mask(); }); }); chunks.host_to_device(stream, true); } } /** * @brief Compute the per-stripe prefix sum of null count, for each struct column in the current * layer. */ void scan_null_counts(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> const& chunks, cudf::host_span<rmm::device_uvector<uint32_t>> prefix_sums, rmm::cuda_stream_view stream) { auto const num_stripes = chunks.size().first; if (num_stripes == 0) return; auto const num_columns = chunks.size().second; std::vector<thrust::pair<size_type, cudf::device_span<uint32_t>>> prefix_sums_to_update; for (auto col_idx = 0ul; col_idx < num_columns; ++col_idx) { // Null counts sums are only needed for children of struct columns if (chunks[0][col_idx].type_kind == STRUCT) { prefix_sums_to_update.emplace_back(col_idx, prefix_sums[col_idx]); } } auto const d_prefix_sums_to_update = cudf::detail::make_device_uvector_async(prefix_sums_to_update, stream); thrust::for_each(rmm::exec_policy(stream), d_prefix_sums_to_update.begin(), d_prefix_sums_to_update.end(), [chunks = cudf::detail::device_2dspan<gpu::ColumnDesc const>{chunks}] __device__( auto const& idx_psums) { auto const col_idx = idx_psums.first; auto const psums = idx_psums.second; thrust::transform( thrust::seq, thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + psums.size(), psums.begin(), [&](auto stripe_idx) { return chunks[stripe_idx][col_idx].null_count; }); thrust::inclusive_scan(thrust::seq, psums.begin(), psums.end(), psums.begin()); }); // `prefix_sums_to_update` goes out of scope, copy has to be done before we return stream.synchronize(); } void reader::impl::decode_stream_data(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, size_t num_dicts, size_t skip_rows, timezone_table_view tz_table, cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups, size_t row_index_stride, std::vector<column_buffer>& out_buffers, size_t level, rmm::cuda_stream_view stream) { const auto num_stripes = chunks.size().first; const auto num_columns = chunks.size().second; thrust::counting_iterator<int> col_idx_it(0); thrust::counting_iterator<int> stripe_idx_it(0); // Update chunks with pointers to column data std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) { std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) { auto& chunk = chunks[stripe_idx][col_idx]; chunk.column_data_base = out_buffers[col_idx].data(); chunk.valid_map_base = out_buffers[col_idx].null_mask(); }); }); // Allocate global dictionary for deserializing rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream); chunks.host_to_device(stream, true); gpu::DecodeNullsAndStringDictionaries( chunks.base_device_ptr(), global_dict.data(), num_columns, num_stripes, skip_rows, stream); if (level > 0) { // Update nullmasks for children if parent was a struct and had null mask update_null_mask(chunks, out_buffers, stream, _mr); } // Update the null map for child columns gpu::DecodeOrcColumnData(chunks.base_device_ptr(), global_dict.data(), row_groups, num_columns, num_stripes, skip_rows, tz_table, row_groups.size().first, row_index_stride, level, stream); chunks.device_to_host(stream, true); std::for_each(col_idx_it + 0, col_idx_it + num_columns, [&](auto col_idx) { out_buffers[col_idx].null_count() = std::accumulate(stripe_idx_it + 0, stripe_idx_it + num_stripes, 0, [&](auto null_count, auto const stripe_idx) { return null_count + chunks[stripe_idx][col_idx].null_count; }); }); } // Aggregate child column metadata per stripe and per column void reader::impl::aggregate_child_meta(cudf::detail::host_2dspan<gpu::ColumnDesc> chunks, cudf::detail::host_2dspan<gpu::RowGroup> row_groups, std::vector<column_buffer>& out_buffers, std::vector<orc_column_meta> const& list_col, const size_type level) { const auto num_of_stripes = chunks.size().first; const auto num_of_rowgroups = row_groups.size().first; const auto num_parent_cols = selected_columns.levels[level].size(); const auto num_child_cols = selected_columns.levels[level + 1].size(); const auto number_of_child_chunks = num_child_cols * num_of_stripes; auto& num_child_rows = _col_meta.num_child_rows; auto& parent_column_data = _col_meta.parent_column_data; // Reset the meta to store child column details. num_child_rows.resize(selected_columns.levels[level + 1].size()); std::fill(num_child_rows.begin(), num_child_rows.end(), 0); parent_column_data.resize(number_of_child_chunks); _col_meta.parent_column_index.resize(number_of_child_chunks); _col_meta.child_start_row.resize(number_of_child_chunks); _col_meta.num_child_rows_per_stripe.resize(number_of_child_chunks); _col_meta.rwgrp_meta.resize(num_of_rowgroups * num_child_cols); auto child_start_row = cudf::detail::host_2dspan<uint32_t>( _col_meta.child_start_row.data(), num_of_stripes, num_child_cols); auto num_child_rows_per_stripe = cudf::detail::host_2dspan<uint32_t>( _col_meta.num_child_rows_per_stripe.data(), num_of_stripes, num_child_cols); auto rwgrp_meta = cudf::detail::host_2dspan<reader_column_meta::row_group_meta>( _col_meta.rwgrp_meta.data(), num_of_rowgroups, num_child_cols); int index = 0; // number of child column processed // For each parent column, update its child column meta for each stripe. std::for_each(list_col.cbegin(), list_col.cend(), [&](const auto p_col) { const auto parent_col_idx = _col_meta.orc_col_map[level][p_col.id]; auto start_row = 0; auto processed_row_groups = 0; for (size_t stripe_id = 0; stripe_id < num_of_stripes; stripe_id++) { // Aggregate num_rows and start_row from processed parent columns per row groups if (num_of_rowgroups) { auto stripe_num_row_groups = chunks[stripe_id][parent_col_idx].num_rowgroups; auto processed_child_rows = 0; for (size_t rowgroup_id = 0; rowgroup_id < stripe_num_row_groups; rowgroup_id++, processed_row_groups++) { const auto child_rows = row_groups[processed_row_groups][parent_col_idx].num_child_rows; for (size_type id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; rwgrp_meta[processed_row_groups][child_col_idx].start_row = processed_child_rows; rwgrp_meta[processed_row_groups][child_col_idx].num_rows = child_rows; } processed_child_rows += child_rows; } } // Aggregate start row, number of rows per chunk and total number of rows in a column const auto child_rows = chunks[stripe_id][parent_col_idx].num_child_rows; for (size_type id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; num_child_rows[child_col_idx] += child_rows; num_child_rows_per_stripe[stripe_id][child_col_idx] = child_rows; // start row could be different for each column when there is nesting at each stripe level child_start_row[stripe_id][child_col_idx] = (stripe_id == 0) ? 0 : start_row; } start_row += child_rows; } // Parent column null mask and null count would be required for child column // to adjust its nullmask. auto type = out_buffers[parent_col_idx].type.id(); auto parent_null_count = static_cast<uint32_t>(out_buffers[parent_col_idx].null_count()); auto parent_valid_map = out_buffers[parent_col_idx].null_mask(); auto num_rows = out_buffers[parent_col_idx].size; for (size_type id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; _col_meta.parent_column_index[child_col_idx] = parent_col_idx; if (type == type_id::STRUCT) { parent_column_data[child_col_idx] = {parent_valid_map, parent_null_count}; // Number of rows in child will remain same as parent in case of struct column num_child_rows[child_col_idx] = num_rows; } else { parent_column_data[child_col_idx] = {nullptr, 0}; } } index += p_col.num_children; }); } std::string get_map_child_col_name(size_t const idx) { return (idx == 0) ? "key" : "value"; } std::unique_ptr<column> reader::impl::create_empty_column(const size_type orc_col_id, column_name_info& schema_info, rmm::cuda_stream_view stream) { schema_info.name = _metadata.column_name(0, orc_col_id); auto const type = to_type_id( _metadata.get_schema(orc_col_id), _use_np_dtypes, _timestamp_type.id(), decimal_column_type( _decimal_cols_as_float, decimal128_columns, _metadata.per_file_metadata[0], orc_col_id)); int32_t scale = 0; std::vector<std::unique_ptr<column>> child_columns; std::unique_ptr<column> out_col = nullptr; auto kind = _metadata.get_col_type(orc_col_id).kind; switch (kind) { case orc::LIST: schema_info.children.emplace_back("offsets"); schema_info.children.emplace_back(""); out_col = make_lists_column( 0, make_empty_column(type_id::INT32), create_empty_column( _metadata.get_col_type(orc_col_id).subtypes[0], schema_info.children.back(), stream), 0, rmm::device_buffer{0, stream}, stream); break; case orc::MAP: { schema_info.children.emplace_back("offsets"); schema_info.children.emplace_back("struct"); const auto child_column_ids = _metadata.get_col_type(orc_col_id).subtypes; for (size_t idx = 0; idx < _metadata.get_col_type(orc_col_id).subtypes.size(); idx++) { auto& children_schema = schema_info.children.back().children; children_schema.emplace_back(""); child_columns.push_back(create_empty_column( child_column_ids[idx], schema_info.children.back().children.back(), stream)); auto name = get_map_child_col_name(idx); children_schema[idx].name = name; } auto struct_col = make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream); out_col = make_lists_column(0, make_empty_column(type_id::INT32), std::move(struct_col), 0, rmm::device_buffer{0, stream}, stream); } break; case orc::STRUCT: for (const auto col : _metadata.get_col_type(orc_col_id).subtypes) { schema_info.children.emplace_back(""); child_columns.push_back(create_empty_column(col, schema_info.children.back(), stream)); } out_col = make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream); break; case orc::DECIMAL: if (type == type_id::DECIMAL64 or type == type_id::DECIMAL128) { scale = -static_cast<int32_t>(_metadata.get_types()[orc_col_id].scale.value_or(0)); } out_col = make_empty_column(data_type(type, scale)); break; default: out_col = make_empty_column(type); } return out_col; } // Adds child column buffers to parent column column_buffer&& reader::impl::assemble_buffer(const size_type orc_col_id, std::vector<std::vector<column_buffer>>& col_buffers, const size_t level, rmm::cuda_stream_view stream) { auto const col_id = _col_meta.orc_col_map[level][orc_col_id]; auto& col_buffer = col_buffers[level][col_id]; col_buffer.name = _metadata.column_name(0, orc_col_id); auto kind = _metadata.get_col_type(orc_col_id).kind; switch (kind) { case orc::LIST: case orc::STRUCT: for (auto const& col : selected_columns.children[orc_col_id]) { col_buffer.children.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream)); } break; case orc::MAP: { std::vector<column_buffer> child_col_buffers; // Get child buffers for (size_t idx = 0; idx < selected_columns.children[orc_col_id].size(); idx++) { auto name = get_map_child_col_name(idx); auto col = selected_columns.children[orc_col_id][idx]; child_col_buffers.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream)); child_col_buffers.back().name = name; } // Create a struct buffer auto num_rows = child_col_buffers[0].size; auto struct_buffer = column_buffer(cudf::data_type(type_id::STRUCT), num_rows, false, stream, _mr); struct_buffer.children = std::move(child_col_buffers); struct_buffer.name = "struct"; col_buffer.children.emplace_back(std::move(struct_buffer)); } break; default: break; } return std::move(col_buffer); } // creates columns along with schema information for each column void reader::impl::create_columns(std::vector<std::vector<column_buffer>>&& col_buffers, std::vector<std::unique_ptr<column>>& out_columns, std::vector<column_name_info>& schema_info, rmm::cuda_stream_view stream) { std::transform(selected_columns.levels[0].begin(), selected_columns.levels[0].end(), std::back_inserter(out_columns), [&](auto const col_meta) { schema_info.emplace_back(""); auto col_buffer = assemble_buffer(col_meta.id, col_buffers, 0, stream); return make_column(col_buffer, &schema_info.back(), stream, _mr); }); } reader::impl::impl(std::vector<std::unique_ptr<datasource>>&& sources, orc_reader_options const& options, rmm::mr::device_memory_resource* mr) : _mr(mr), _sources(std::move(sources)), _metadata{_sources}, selected_columns{_metadata.select_columns(options.get_columns())} { // Override output timestamp resolution if requested if (options.get_timestamp_type().id() != type_id::EMPTY) { _timestamp_type = options.get_timestamp_type(); } // Enable or disable attempt to use row index for parsing _use_index = options.is_enabled_use_index(); // Enable or disable the conversion to numpy-compatible dtypes _use_np_dtypes = options.is_enabled_use_np_dtypes(); // Control decimals conversion _decimal_cols_as_float = options.get_decimal_cols_as_float(); decimal128_columns = options.get_decimal128_columns(); } timezone_table reader::impl::compute_timezone_table( const std::vector<cudf::io::orc::metadata::stripe_source_mapping>& selected_stripes, rmm::cuda_stream_view stream) { if (selected_stripes.empty()) return {}; auto const has_timestamp_column = std::any_of( selected_columns.levels.cbegin(), selected_columns.levels.cend(), [&](auto& col_lvl) { return std::any_of(col_lvl.cbegin(), col_lvl.cend(), [&](auto& col_meta) { return _metadata.get_col_type(col_meta.id).kind == TypeKind::TIMESTAMP; }); }); if (not has_timestamp_column) return {}; return build_timezone_transition_table(selected_stripes[0].stripe_info[0].second->writerTimezone, stream); } table_with_metadata reader::impl::read(size_type skip_rows, size_type num_rows, const std::vector<std::vector<size_type>>& stripes, rmm::cuda_stream_view stream) { // Selected columns at different levels of nesting are stored in different elements // of `selected_columns`; thus, size == 1 means no nested columns CUDF_EXPECTS(skip_rows == 0 or selected_columns.num_levels() == 1, "skip_rows is not supported by nested columns"); std::vector<std::unique_ptr<column>> out_columns; // buffer and stripe data are stored as per nesting level std::vector<std::vector<column_buffer>> out_buffers(selected_columns.num_levels()); std::vector<column_name_info> schema_info; std::vector<std::vector<rmm::device_buffer>> lvl_stripe_data(selected_columns.num_levels()); std::vector<std::vector<rmm::device_uvector<uint32_t>>> null_count_prefix_sums; table_metadata out_metadata; // There are no columns in the table if (selected_columns.num_levels() == 0) return {std::make_unique<table>(), std::move(out_metadata)}; // Select only stripes required (aka row groups) const auto selected_stripes = _metadata.select_stripes(stripes, skip_rows, num_rows); auto const tz_table = compute_timezone_table(selected_stripes, stream); // Iterates through levels of nested columns, child column will be one level down // compared to parent column. for (size_t level = 0; level < selected_columns.num_levels(); level++) { auto& columns_level = selected_columns.levels[level]; // Association between each ORC column and its cudf::column _col_meta.orc_col_map.emplace_back(_metadata.get_num_cols(), -1); std::vector<orc_column_meta> nested_col; bool is_data_empty = false; // Get a list of column data types std::vector<data_type> column_types; for (auto& col : columns_level) { auto col_type = to_type_id( _metadata.get_col_type(col.id), _use_np_dtypes, _timestamp_type.id(), decimal_column_type( _decimal_cols_as_float, decimal128_columns, _metadata.per_file_metadata[0], col.id)); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); CUDF_EXPECTS( (col_type != type_id::DECIMAL64) or (_metadata.get_col_type(col.id).precision <= 18), "Precision of column " + std::string{_metadata.column_name(0, col.id)} + " is over 18, use 128-bit Decimal."); if (col_type == type_id::DECIMAL64 or col_type == type_id::DECIMAL128) { // sign of the scale is changed since cuDF follows c++ libraries like CNL // which uses negative scaling, but liborc and other libraries // follow positive scaling. auto const scale = -static_cast<size_type>(_metadata.get_col_type(col.id).scale.value_or(0)); column_types.emplace_back(col_type, scale); } else { column_types.emplace_back(col_type); } // Map each ORC column to its column _col_meta.orc_col_map[level][col.id] = column_types.size() - 1; // TODO: Once MAP type is supported in cuDF, update this for MAP as well if (col_type == type_id::LIST or col_type == type_id::STRUCT) nested_col.emplace_back(col); } // If no rows or stripes to read, return empty columns if (num_rows <= 0 || selected_stripes.empty()) { std::transform(selected_columns.levels[0].begin(), selected_columns.levels[0].end(), std::back_inserter(out_columns), [&](auto const col_meta) { schema_info.emplace_back(""); return create_empty_column(col_meta.id, schema_info.back(), stream); }); break; } else { // Get the total number of stripes across all input files. size_t total_num_stripes = std::accumulate(selected_stripes.begin(), selected_stripes.end(), 0, [](size_t sum, auto& stripe_source_mapping) { return sum + stripe_source_mapping.stripe_info.size(); }); const auto num_columns = columns_level.size(); cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> chunks( total_num_stripes, num_columns, stream); memset(chunks.base_host_ptr(), 0, chunks.memory_size()); const bool use_index = (_use_index == true) && // Do stripes have row group index _metadata.is_row_grp_idx_present() && // Only use if we don't have much work with complete columns & stripes // TODO: Consider nrows, gpu, and tune the threshold (num_rows > _metadata.get_row_index_stride() && !(_metadata.get_row_index_stride() & 7) && _metadata.get_row_index_stride() > 0 && num_columns * total_num_stripes < 8 * 128) && // Only use if first row is aligned to a stripe boundary // TODO: Fix logic to handle unaligned rows (skip_rows == 0); // Logically view streams as columns std::vector<orc_stream_info> stream_info; null_count_prefix_sums.emplace_back(); null_count_prefix_sums.back().reserve(selected_columns.levels[level].size()); std::generate_n(std::back_inserter(null_count_prefix_sums.back()), selected_columns.levels[level].size(), [&]() { return cudf::detail::make_zeroed_device_uvector_async<uint32_t>( total_num_stripes, stream); }); // Tracker for eventually deallocating compressed and uncompressed data auto& stripe_data = lvl_stripe_data[level]; size_t stripe_start_row = 0; size_t num_dict_entries = 0; size_t num_rowgroups = 0; int stripe_idx = 0; std::vector<std::pair<std::future<size_t>, size_t>> read_tasks; for (auto const& stripe_source_mapping : selected_stripes) { // Iterate through the source files selected stripes for (auto const& stripe : stripe_source_mapping.stripe_info) { const auto stripe_info = stripe.first; const auto stripe_footer = stripe.second; auto stream_count = stream_info.size(); const auto total_data_size = gather_stream_info(stripe_idx, stripe_info, stripe_footer, _col_meta.orc_col_map[level], _metadata.get_types(), use_index, &num_dict_entries, chunks, stream_info, level == 0); if (total_data_size == 0) { CUDF_EXPECTS(stripe_info->indexLength == 0, "Invalid index rowgroup stream data"); // In case ROW GROUP INDEX is not present and all columns are structs with no null // stream, there is nothing to read at this level. auto fn_check_dtype = [](auto dtype) { return dtype.id() == type_id::STRUCT; }; CUDF_EXPECTS(std::all_of(column_types.begin(), column_types.end(), fn_check_dtype), "Expected streams data within stripe"); is_data_empty = true; } stripe_data.emplace_back(total_data_size, stream); auto dst_base = static_cast<uint8_t*>(stripe_data.back().data()); // Coalesce consecutive streams into one read while (not is_data_empty and stream_count < stream_info.size()) { const auto d_dst = dst_base + stream_info[stream_count].dst_pos; const auto offset = stream_info[stream_count].offset; auto len = stream_info[stream_count].length; stream_count++; while (stream_count < stream_info.size() && stream_info[stream_count].offset == offset + len) { len += stream_info[stream_count].length; stream_count++; } if (_metadata.per_file_metadata[stripe_source_mapping.source_idx] .source->is_device_read_preferred(len)) { read_tasks.push_back( std::make_pair(_metadata.per_file_metadata[stripe_source_mapping.source_idx] .source->device_read_async(offset, len, d_dst, stream), len)); } else { const auto buffer = _metadata.per_file_metadata[stripe_source_mapping.source_idx].source->host_read( offset, len); CUDF_EXPECTS(buffer->size() == len, "Unexpected discrepancy in bytes read."); CUDA_TRY(cudaMemcpyAsync( d_dst, buffer->data(), len, cudaMemcpyHostToDevice, stream.value())); stream.synchronize(); } } const auto num_rows_per_stripe = stripe_info->numberOfRows; const auto rowgroup_id = num_rowgroups; auto stripe_num_rowgroups = 0; if (use_index) { stripe_num_rowgroups = (num_rows_per_stripe + _metadata.get_row_index_stride() - 1) / _metadata.get_row_index_stride(); } // Update chunks to reference streams pointers for (size_t col_idx = 0; col_idx < num_columns; col_idx++) { auto& chunk = chunks[stripe_idx][col_idx]; // start row, number of rows in a each stripe and total number of rows // may change in lower levels of nesting chunk.start_row = (level == 0) ? stripe_start_row : _col_meta.child_start_row[stripe_idx * num_columns + col_idx]; chunk.num_rows = (level == 0) ? stripe_info->numberOfRows : _col_meta.num_child_rows_per_stripe[stripe_idx * num_columns + col_idx]; chunk.column_num_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[col_idx]; chunk.parent_validity_info = (level == 0) ? column_validity_info{} : _col_meta.parent_column_data[col_idx]; chunk.parent_null_count_prefix_sums = (level == 0) ? nullptr : null_count_prefix_sums[level - 1][_col_meta.parent_column_index[col_idx]].data(); chunk.encoding_kind = stripe_footer->columns[columns_level[col_idx].id].kind; chunk.type_kind = _metadata.per_file_metadata[stripe_source_mapping.source_idx] .ff.types[columns_level[col_idx].id] .kind; // num_child_rows for a struct column will be same, for other nested types it will be // calculated. chunk.num_child_rows = (chunk.type_kind != orc::STRUCT) ? 0 : chunk.num_rows; chunk.dtype_id = column_types[col_idx].id(); chunk.decimal_scale = _metadata.per_file_metadata[stripe_source_mapping.source_idx] .ff.types[columns_level[col_idx].id] .scale.value_or(0); chunk.rowgroup_id = rowgroup_id; chunk.dtype_len = (column_types[col_idx].id() == type_id::STRING) ? sizeof(string_index_pair) : ((column_types[col_idx].id() == type_id::LIST) or (column_types[col_idx].id() == type_id::STRUCT)) ? sizeof(size_type) : cudf::size_of(column_types[col_idx]); chunk.num_rowgroups = stripe_num_rowgroups; if (chunk.type_kind == orc::TIMESTAMP) { chunk.timestamp_type_id = _timestamp_type.id(); } if (not is_data_empty) { for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) { chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos; } } } stripe_start_row += num_rows_per_stripe; num_rowgroups += stripe_num_rowgroups; stripe_idx++; } } for (auto& task : read_tasks) { CUDF_EXPECTS(task.first.get() == task.second, "Unexpected discrepancy in bytes read."); } // Process dataset chunk pages into output columns if (stripe_data.size() != 0) { auto row_groups = cudf::detail::hostdevice_2dvector<gpu::RowGroup>(num_rowgroups, num_columns, stream); if (level > 0 and row_groups.size().first) { cudf::host_span<gpu::RowGroup> row_groups_span(row_groups.base_host_ptr(), num_rowgroups * num_columns); auto& rw_grp_meta = _col_meta.rwgrp_meta; // Update start row and num rows per row group std::transform(rw_grp_meta.begin(), rw_grp_meta.end(), row_groups_span.begin(), rw_grp_meta.begin(), [&](auto meta, auto& row_grp) { row_grp.num_rows = meta.num_rows; row_grp.start_row = meta.start_row; return meta; }); } // Setup row group descriptors if using indexes if (_metadata.per_file_metadata[0].ps.compression != orc::NONE and not is_data_empty) { auto decomp_data = decompress_stripe_data(chunks, stripe_data, _metadata.per_file_metadata[0].decompressor.get(), stream_info, total_num_stripes, row_groups, _metadata.get_row_index_stride(), level == 0, stream); stripe_data.clear(); stripe_data.push_back(std::move(decomp_data)); } else { if (row_groups.size().first) { chunks.host_to_device(stream); row_groups.host_to_device(stream); gpu::ParseRowGroupIndex(row_groups.base_device_ptr(), nullptr, chunks.base_device_ptr(), num_columns, total_num_stripes, num_rowgroups, _metadata.get_row_index_stride(), level == 0, stream); } } for (size_t i = 0; i < column_types.size(); ++i) { bool is_nullable = false; for (size_t j = 0; j < total_num_stripes; ++j) { if (chunks[j][i].strm_len[gpu::CI_PRESENT] != 0) { is_nullable = true; break; } } auto is_list_type = (column_types[i].id() == type_id::LIST); auto n_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[i]; // For list column, offset column will be always size + 1 if (is_list_type) n_rows++; out_buffers[level].emplace_back(column_types[i], n_rows, is_nullable, stream, _mr); } if (not is_data_empty) { decode_stream_data(chunks, num_dict_entries, skip_rows, tz_table.view(), row_groups, _metadata.get_row_index_stride(), out_buffers[level], level, stream); } // Extract information to process nested child columns if (nested_col.size()) { if (not is_data_empty) { scan_null_counts(chunks, null_count_prefix_sums[level], stream); } row_groups.device_to_host(stream, true); aggregate_child_meta(chunks, row_groups, out_buffers[level], nested_col, level); } // ORC stores number of elements at each row, so we need to generate offsets from that if (nested_col.size()) { std::vector<list_buffer_data> buff_data; std::for_each( out_buffers[level].begin(), out_buffers[level].end(), [&buff_data](auto& out_buffer) { if (out_buffer.type.id() == type_id::LIST) { auto data = static_cast<size_type*>(out_buffer.data()); buff_data.emplace_back(list_buffer_data{data, out_buffer.size}); } }); if (buff_data.size()) { auto const dev_buff_data = cudf::detail::make_device_uvector_async(buff_data, stream); generate_offsets_for_list(dev_buff_data, stream); } } } } } // If out_columns is empty, then create columns from buffer. if (out_columns.empty()) { create_columns(std::move(out_buffers), out_columns, schema_info, stream); } // Return column names (must match order of returned columns) out_metadata.column_names.reserve(schema_info.size()); std::transform(schema_info.cbegin(), schema_info.cend(), std::back_inserter(out_metadata.column_names), [](auto info) { return info.name; }); out_metadata.schema_info = std::move(schema_info); for (const auto& meta : _metadata.per_file_metadata) { for (const auto& kv : meta.ff.metadata) { out_metadata.user_data.insert({kv.name, kv.value}); } } return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)}; } // Forward to implementation reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources, orc_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { _impl = std::make_unique<impl>(std::move(sources), options, mr); } // Destructor within this translation unit reader::~reader() = default; // Forward to implementation table_with_metadata reader::read(orc_reader_options const& options, rmm::cuda_stream_view stream) { return _impl->read( options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream); } } // namespace orc } // namespace detail } // namespace io } // namespace cudf
c376ceff4cf3d58aad111e3e326b669e7a36bd7f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "compute.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a_d = NULL; hipMalloc(&a_d, XSIZE*YSIZE); float *b_d = NULL; hipMalloc(&b_d, XSIZE*YSIZE); float *c_d = NULL; hipMalloc(&c_d, XSIZE*YSIZE); int arraySize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( compute), dim3(gridBlock),dim3(threadBlock), 0, 0, a_d,b_d,c_d,arraySize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( compute), dim3(gridBlock),dim3(threadBlock), 0, 0, a_d,b_d,c_d,arraySize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( compute), dim3(gridBlock),dim3(threadBlock), 0, 0, a_d,b_d,c_d,arraySize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c376ceff4cf3d58aad111e3e326b669e7a36bd7f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "compute.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a_d = NULL; cudaMalloc(&a_d, XSIZE*YSIZE); float *b_d = NULL; cudaMalloc(&b_d, XSIZE*YSIZE); float *c_d = NULL; cudaMalloc(&c_d, XSIZE*YSIZE); int arraySize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); compute<<<gridBlock,threadBlock>>>(a_d,b_d,c_d,arraySize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { compute<<<gridBlock,threadBlock>>>(a_d,b_d,c_d,arraySize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { compute<<<gridBlock,threadBlock>>>(a_d,b_d,c_d,arraySize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8cdf4b203ff361edfa32b11ebdfaf635a7a769fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <string> #include <fstream> #include <sstream> #include <vector> #include <iterator> #include <unordered_map> #include <bitset> #include <thread> #include <math.h> #define BITS 64 #define MAX_LEN_WORD 20 using namespace std; string readFile(string file) { ifstream MyFile; MyFile.open(file); stringstream strStream; strStream << MyFile.rdbuf(); return strStream.str(); } vector<string> strToVector(string data, char stop_w = '\n') { vector<string> out; size_t last_init = 0; for (size_t i = 0; i < data.length(); i++) { if (data[i] == stop_w) { string temp = data.substr(last_init, i - last_init); out.push_back(temp); last_init = i + 1; } } return out; } void printVector(vector<string> data) { for (size_t i = 0; i < data.size(); i++) { printf("%s\n", data[i].c_str()); } } void printVectorInt(vector<int> data) { for (size_t i = 0; i < data.size(); i++) { printf("%i ", data[i]); } printf("\n"); } void printMap(unordered_map <string, int> data) { for (unordered_map<string, int >::const_iterator it = data.begin(); it != data.end(); ++it) { cout << it->first << " " << it->second << "\n"; } } string* text_generator(vector<string> my_dict, int len_words) { string* text = new string[len_words]; for (size_t i = 0; i < len_words; i++) { text[i] = my_dict[rand() % my_dict.size()]; } return text; } unordered_map <string, int> get_frec(string* words, int len_words) { unordered_map <string, int> out; for (size_t i = 0; i < len_words; i++) { if (out.count(words[i]) > 0) { // cuantas veces parece la palabra en el texto out[words[i]] += 1; } else { out[words[i]] = 1; } } return out; } string sum_matrix(vector<long*> in_matrix) { string out; for (size_t j = 0; j < BITS; j++) { long int temp = 0; for (size_t i = 0; i < in_matrix.size(); i++) { temp += in_matrix[i][j]; } if (temp > 0) { out += '1'; } else { out += '0'; } } return out; } size_t count_words(unordered_map <string, int>* in_words, int n_text) { size_t count_l = 0; for (size_t i = 0; i < n_text; i++) { count_l += in_words[i].size(); } return count_l; } char* strToChar(string data) { char* out = new char[MAX_LEN_WORD]; for (size_t i = 0; i < MAX_LEN_WORD; i++) { if (i < data.size()) { out[i] = data[i]; } else { out[i] = 0; } } return out; } void compress_sim_data_cuda(unordered_map <string, int>* in_words, char * & s_out, int * & f_out , int n_text) { size_t numerate = 0; for (size_t i = 0; i < n_text; i++) { for (unordered_map<string, int >::const_iterator it = in_words[i].begin(); it != in_words[i].end(); ++it) { char* t_str = strToChar(it->first); for (size_t i = 0; i < MAX_LEN_WORD; i++) { s_out[numerate * MAX_LEN_WORD + i] = t_str[i]; } f_out[numerate] = it->second; numerate++; } } } string* extract_sim_data_cuda(long * v_words, unordered_map <string, int>* in_words, int n_text) { string* out = new string[n_text]; size_t numerate = 0; for (size_t i = 0; i < n_text; i++) { vector<long*> temp; for (unordered_map<string, int >::const_iterator it = in_words[i].begin(); it != in_words[i].end(); ++it) { long * t_bits = new long[BITS]; for (size_t b = 0; b < BITS; b++) { t_bits[b] = v_words[numerate + b]; } temp.push_back(t_bits); numerate+= BITS; } out[i] = sum_matrix(temp); } return out; } void sim_hash_lineal(char* s_in, int* f_in, long* & out, int len) { for (size_t t = 0; t < len; t++) { unsigned long long int hash = 5381; for (size_t i = 0; i < MAX_LEN_WORD; i++) { if (s_in[t * MAX_LEN_WORD + i] != 0) { hash = ((hash << 5) + hash) + (int)s_in[t * MAX_LEN_WORD + i]; } } bool* bits = new bool[BITS]; for (size_t i = 0; i < BITS; i++) { bits[i] = hash % 2; hash = hash / 2; } for (size_t i = 0; i < BITS; i++) { size_t p = (t * BITS) + i; out[p] = (int)bits[i]; if (out[p] == 1) { out[p] += f_in[t]; } else { out[p] -= f_in[t]; } } } } __global__ void cuda_sim_hash(char * s_in, int * f_in, long * out, int len) { int t = (blockIdx.x * blockDim.x) + (threadIdx.x); if (t >= 0 && t < len) { unsigned long long int hash = 5381; for (size_t i = 0; i < MAX_LEN_WORD; i++) { if (s_in[t * MAX_LEN_WORD + i] != 0) { hash = ((hash << 5) + hash) + (int)s_in[t * MAX_LEN_WORD + i]; } } bool* bits = new bool[BITS]; for (size_t i = 0; i < BITS; i++) { bits[i] = hash % 2; hash = hash / 2; } for (size_t i = 0; i < BITS; i++) { size_t p = (t * BITS) + i; out[p] = (int)bits[i]; if (out[p] == 1) { out[p] += f_in[t]; } else { out[p] -= f_in[t]; } } delete bits; } } bool compare_str(string * a, string * b, int len) { for (size_t i = 0; i < len; i++) { if (a[i] != b[i]) { return false; } } return true; } int main() { clock_t begin, end; double elapsed_secs; long long int w_s; //palabras por segundo srand(time(NULL)); string words = readFile("words.txt"); //leer palabras vector<string> l_words = strToVector(words); //libreria de palabras int long_text = 4000; //longitud de palabras por texto int n_text = 2560; //cantidad de textos (documentos) printf("Num. textos: %i, Long Text: %i \n", n_text, long_text); unordered_map <string, int>* words_frec = new unordered_map <string, int>[n_text]; // diccionario de frecuencias por palabra de cada documento for (size_t i = 0; i < n_text; i++) { words_frec[i] = get_frec(text_generator(l_words, long_text), long_text); } size_t amount_words = count_words(words_frec, n_text); printf("Total Words: %i words \n", amount_words); char * s_in = new char[amount_words * MAX_LEN_WORD]; int * f_in = new int[amount_words]; long * out = new long[amount_words * BITS]; compress_sim_data_cuda(words_frec, s_in, f_in , n_text); char* cu_s_in = 0; int* cu_f_in = 0; long* cu_out = 0; hipMalloc((void**)&cu_s_in, amount_words * sizeof(char) * MAX_LEN_WORD); hipMalloc((void**)&cu_f_in, amount_words * sizeof(int)); hipMalloc((void**)&cu_out, amount_words * sizeof(long) * BITS); begin = clock(); hipMemcpy(cu_s_in, s_in, amount_words * sizeof(char) * MAX_LEN_WORD, hipMemcpyHostToDevice); hipMemcpy(cu_f_in, f_in, amount_words * sizeof(int), hipMemcpyHostToDevice); int thr = 1024; int dim_grid = (amount_words/thr)+1; hipLaunchKernelGGL(( cuda_sim_hash) , dim3(dim_grid), dim3(thr) , 0, 0, cu_s_in, cu_f_in, cu_out, amount_words); hipMemcpy(out, cu_out, amount_words * sizeof(long) * BITS, hipMemcpyDeviceToHost); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Grilla: %d, Bloque: %d \n", dim_grid, thr); printf("Tiempo Cuda: %f ms \n", elapsed_secs); w_s = amount_words / elapsed_secs; printf("palabras por Seg: %d words \n", w_s); string* r_out = extract_sim_data_cuda(out, words_frec, n_text); long* out_l = new long[amount_words * BITS]; begin = clock(); sim_hash_lineal(s_in, f_in, out_l, amount_words); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Tiempo Lineal: %f ms \n", elapsed_secs); w_s = amount_words / elapsed_secs; printf("palabras por Seg: %d words \n", w_s); string* r_out_l = extract_sim_data_cuda(out_l, words_frec, n_text); /* for (size_t i = 0; i < n_text; i++) { cout << r_out_l[i] << endl; }*/ if (compare_str(r_out, r_out_l, n_text)) { cout << "Ok" << endl; } else { cout << "Error" << endl; } }
8cdf4b203ff361edfa32b11ebdfaf635a7a769fe.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <string> #include <fstream> #include <sstream> #include <vector> #include <iterator> #include <unordered_map> #include <bitset> #include <thread> #include <math.h> #define BITS 64 #define MAX_LEN_WORD 20 using namespace std; string readFile(string file) { ifstream MyFile; MyFile.open(file); stringstream strStream; strStream << MyFile.rdbuf(); return strStream.str(); } vector<string> strToVector(string data, char stop_w = '\n') { vector<string> out; size_t last_init = 0; for (size_t i = 0; i < data.length(); i++) { if (data[i] == stop_w) { string temp = data.substr(last_init, i - last_init); out.push_back(temp); last_init = i + 1; } } return out; } void printVector(vector<string> data) { for (size_t i = 0; i < data.size(); i++) { printf("%s\n", data[i].c_str()); } } void printVectorInt(vector<int> data) { for (size_t i = 0; i < data.size(); i++) { printf("%i ", data[i]); } printf("\n"); } void printMap(unordered_map <string, int> data) { for (unordered_map<string, int >::const_iterator it = data.begin(); it != data.end(); ++it) { cout << it->first << " " << it->second << "\n"; } } string* text_generator(vector<string> my_dict, int len_words) { string* text = new string[len_words]; for (size_t i = 0; i < len_words; i++) { text[i] = my_dict[rand() % my_dict.size()]; } return text; } unordered_map <string, int> get_frec(string* words, int len_words) { unordered_map <string, int> out; for (size_t i = 0; i < len_words; i++) { if (out.count(words[i]) > 0) { // cuantas veces parece la palabra en el texto out[words[i]] += 1; } else { out[words[i]] = 1; } } return out; } string sum_matrix(vector<long*> in_matrix) { string out; for (size_t j = 0; j < BITS; j++) { long int temp = 0; for (size_t i = 0; i < in_matrix.size(); i++) { temp += in_matrix[i][j]; } if (temp > 0) { out += '1'; } else { out += '0'; } } return out; } size_t count_words(unordered_map <string, int>* in_words, int n_text) { size_t count_l = 0; for (size_t i = 0; i < n_text; i++) { count_l += in_words[i].size(); } return count_l; } char* strToChar(string data) { char* out = new char[MAX_LEN_WORD]; for (size_t i = 0; i < MAX_LEN_WORD; i++) { if (i < data.size()) { out[i] = data[i]; } else { out[i] = 0; } } return out; } void compress_sim_data_cuda(unordered_map <string, int>* in_words, char * & s_out, int * & f_out , int n_text) { size_t numerate = 0; for (size_t i = 0; i < n_text; i++) { for (unordered_map<string, int >::const_iterator it = in_words[i].begin(); it != in_words[i].end(); ++it) { char* t_str = strToChar(it->first); for (size_t i = 0; i < MAX_LEN_WORD; i++) { s_out[numerate * MAX_LEN_WORD + i] = t_str[i]; } f_out[numerate] = it->second; numerate++; } } } string* extract_sim_data_cuda(long * v_words, unordered_map <string, int>* in_words, int n_text) { string* out = new string[n_text]; size_t numerate = 0; for (size_t i = 0; i < n_text; i++) { vector<long*> temp; for (unordered_map<string, int >::const_iterator it = in_words[i].begin(); it != in_words[i].end(); ++it) { long * t_bits = new long[BITS]; for (size_t b = 0; b < BITS; b++) { t_bits[b] = v_words[numerate + b]; } temp.push_back(t_bits); numerate+= BITS; } out[i] = sum_matrix(temp); } return out; } void sim_hash_lineal(char* s_in, int* f_in, long* & out, int len) { for (size_t t = 0; t < len; t++) { unsigned long long int hash = 5381; for (size_t i = 0; i < MAX_LEN_WORD; i++) { if (s_in[t * MAX_LEN_WORD + i] != 0) { hash = ((hash << 5) + hash) + (int)s_in[t * MAX_LEN_WORD + i]; } } bool* bits = new bool[BITS]; for (size_t i = 0; i < BITS; i++) { bits[i] = hash % 2; hash = hash / 2; } for (size_t i = 0; i < BITS; i++) { size_t p = (t * BITS) + i; out[p] = (int)bits[i]; if (out[p] == 1) { out[p] += f_in[t]; } else { out[p] -= f_in[t]; } } } } __global__ void cuda_sim_hash(char * s_in, int * f_in, long * out, int len) { int t = (blockIdx.x * blockDim.x) + (threadIdx.x); if (t >= 0 && t < len) { unsigned long long int hash = 5381; for (size_t i = 0; i < MAX_LEN_WORD; i++) { if (s_in[t * MAX_LEN_WORD + i] != 0) { hash = ((hash << 5) + hash) + (int)s_in[t * MAX_LEN_WORD + i]; } } bool* bits = new bool[BITS]; for (size_t i = 0; i < BITS; i++) { bits[i] = hash % 2; hash = hash / 2; } for (size_t i = 0; i < BITS; i++) { size_t p = (t * BITS) + i; out[p] = (int)bits[i]; if (out[p] == 1) { out[p] += f_in[t]; } else { out[p] -= f_in[t]; } } delete bits; } } bool compare_str(string * a, string * b, int len) { for (size_t i = 0; i < len; i++) { if (a[i] != b[i]) { return false; } } return true; } int main() { clock_t begin, end; double elapsed_secs; long long int w_s; //palabras por segundo srand(time(NULL)); string words = readFile("words.txt"); //leer palabras vector<string> l_words = strToVector(words); //libreria de palabras int long_text = 4000; //longitud de palabras por texto int n_text = 2560; //cantidad de textos (documentos) printf("Num. textos: %i, Long Text: %i \n", n_text, long_text); unordered_map <string, int>* words_frec = new unordered_map <string, int>[n_text]; // diccionario de frecuencias por palabra de cada documento for (size_t i = 0; i < n_text; i++) { words_frec[i] = get_frec(text_generator(l_words, long_text), long_text); } size_t amount_words = count_words(words_frec, n_text); printf("Total Words: %i words \n", amount_words); char * s_in = new char[amount_words * MAX_LEN_WORD]; int * f_in = new int[amount_words]; long * out = new long[amount_words * BITS]; compress_sim_data_cuda(words_frec, s_in, f_in , n_text); char* cu_s_in = 0; int* cu_f_in = 0; long* cu_out = 0; cudaMalloc((void**)&cu_s_in, amount_words * sizeof(char) * MAX_LEN_WORD); cudaMalloc((void**)&cu_f_in, amount_words * sizeof(int)); cudaMalloc((void**)&cu_out, amount_words * sizeof(long) * BITS); begin = clock(); cudaMemcpy(cu_s_in, s_in, amount_words * sizeof(char) * MAX_LEN_WORD, cudaMemcpyHostToDevice); cudaMemcpy(cu_f_in, f_in, amount_words * sizeof(int), cudaMemcpyHostToDevice); int thr = 1024; int dim_grid = (amount_words/thr)+1; cuda_sim_hash <<< dim_grid, thr >>> (cu_s_in, cu_f_in, cu_out, amount_words); cudaMemcpy(out, cu_out, amount_words * sizeof(long) * BITS, cudaMemcpyDeviceToHost); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Grilla: %d, Bloque: %d \n", dim_grid, thr); printf("Tiempo Cuda: %f ms \n", elapsed_secs); w_s = amount_words / elapsed_secs; printf("palabras por Seg: %d words \n", w_s); string* r_out = extract_sim_data_cuda(out, words_frec, n_text); long* out_l = new long[amount_words * BITS]; begin = clock(); sim_hash_lineal(s_in, f_in, out_l, amount_words); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Tiempo Lineal: %f ms \n", elapsed_secs); w_s = amount_words / elapsed_secs; printf("palabras por Seg: %d words \n", w_s); string* r_out_l = extract_sim_data_cuda(out_l, words_frec, n_text); /* for (size_t i = 0; i < n_text; i++) { cout << r_out_l[i] << endl; }*/ if (compare_str(r_out, r_out_l, n_text)) { cout << "Ok" << endl; } else { cout << "Error" << endl; } }
2888b49b9e3232ce5c7ee5e86e47d35378c4ccee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <torch/torch.h> #include <iostream> __global__ void box_iou_cuda_kernel(float *box_iou, float4 *box1, float4 *box2, long M, long N, int idxJump) { int idx = blockIdx.x*blockDim.x + threadIdx.x; size_t b1_idx, b2_idx, b1_row_offset, b2_row_offset; float xmin1, xmin2, xmax1, xmax2, ymin1, ymin2, ymax1, ymax2; float x_tl, y_tl, x_br, y_br, w, h, inter, area1, area2, iou; for (long i = idx; i < M * N; i += idxJump){ b1_idx = i / N; b2_idx = i % N; b1_row_offset = b1_idx; b2_row_offset = b2_idx; xmin1 = box1[b1_row_offset].x; ymin1 = box1[b1_row_offset].y; xmax1 = box1[b1_row_offset].z; ymax1 = box1[b1_row_offset].w; xmin2 = box2[b2_row_offset].x; ymin2 = box2[b2_row_offset].y; xmax2 = box2[b2_row_offset].z; ymax2 = box2[b2_row_offset].w; x_tl = fmaxf(xmin1, xmin2); y_tl = fmaxf(ymin1, ymin2); x_br = fminf(xmax1, xmax2); y_br = fminf(ymax1, ymax2); w = (x_br - x_tl + 1 - 1) < 0 ? 0.0f : (x_br - x_tl + 1 - 1); h = (y_br - y_tl + 1 - 1) < 0 ? 0.0f : (y_br - y_tl + 1 - 1); inter = w * h; area1 = (xmax1 - xmin1 + 1 - 1) * (ymax1 - ymin1 + 1 - 1); area2 = (xmax2 - xmin2 + 1 - 1) * (ymax2 - ymin2 + 1 - 1); iou = inter / (area1 + area2 - inter); box_iou[b1_idx * N + b2_idx] = iou; } } at::Tensor box_iou_cuda(at::Tensor box1, at::Tensor box2){ int minGridSize; int blockSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*) box_iou_cuda_kernel, 0, // dynamic memory 0); // maximum utilized threads long M = box1.size(0); long N = box2.size(0); auto box_iou = torch::ones({M, N}, torch::CUDA(at::kFloat)); dim3 gridDim(minGridSize); dim3 blockDim(blockSize); int idxJump = minGridSize * blockSize; auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( box_iou_cuda_kernel), dim3(gridDim), dim3(blockDim), 0, stream.stream(), box_iou.data<float>(), (float4*) box1.data<float>(), (float4*) box2.data<float>(), M, N, idxJump); return box_iou; }
2888b49b9e3232ce5c7ee5e86e47d35378c4ccee.cu
/** * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <torch/torch.h> #include <iostream> __global__ void box_iou_cuda_kernel(float *box_iou, float4 *box1, float4 *box2, long M, long N, int idxJump) { int idx = blockIdx.x*blockDim.x + threadIdx.x; size_t b1_idx, b2_idx, b1_row_offset, b2_row_offset; float xmin1, xmin2, xmax1, xmax2, ymin1, ymin2, ymax1, ymax2; float x_tl, y_tl, x_br, y_br, w, h, inter, area1, area2, iou; for (long i = idx; i < M * N; i += idxJump){ b1_idx = i / N; b2_idx = i % N; b1_row_offset = b1_idx; b2_row_offset = b2_idx; xmin1 = box1[b1_row_offset].x; ymin1 = box1[b1_row_offset].y; xmax1 = box1[b1_row_offset].z; ymax1 = box1[b1_row_offset].w; xmin2 = box2[b2_row_offset].x; ymin2 = box2[b2_row_offset].y; xmax2 = box2[b2_row_offset].z; ymax2 = box2[b2_row_offset].w; x_tl = fmaxf(xmin1, xmin2); y_tl = fmaxf(ymin1, ymin2); x_br = fminf(xmax1, xmax2); y_br = fminf(ymax1, ymax2); w = (x_br - x_tl + 1 - 1) < 0 ? 0.0f : (x_br - x_tl + 1 - 1); h = (y_br - y_tl + 1 - 1) < 0 ? 0.0f : (y_br - y_tl + 1 - 1); inter = w * h; area1 = (xmax1 - xmin1 + 1 - 1) * (ymax1 - ymin1 + 1 - 1); area2 = (xmax2 - xmin2 + 1 - 1) * (ymax2 - ymin2 + 1 - 1); iou = inter / (area1 + area2 - inter); box_iou[b1_idx * N + b2_idx] = iou; } } at::Tensor box_iou_cuda(at::Tensor box1, at::Tensor box2){ int minGridSize; int blockSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*) box_iou_cuda_kernel, 0, // dynamic memory 0); // maximum utilized threads long M = box1.size(0); long N = box2.size(0); auto box_iou = torch::ones({M, N}, torch::CUDA(at::kFloat)); dim3 gridDim(minGridSize); dim3 blockDim(blockSize); int idxJump = minGridSize * blockSize; auto stream = at::cuda::getCurrentCUDAStream(); box_iou_cuda_kernel<<<gridDim, blockDim, 0, stream.stream()>>>(box_iou.data<float>(), (float4*) box1.data<float>(), (float4*) box2.data<float>(), M, N, idxJump); return box_iou; }
36e78980722bc3cf74813069f120e320c990e434.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> #define BYDIMF 2 #define CDIM 5 #define BYDIMB 5 #if __CUDA_ARCH__ >= 300 /* * Positive kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) { const int nwindow = 2*SKIP+1; int iwords[nwindow]; float aa[NREPS]; float daa[NREPS]; float bb[NREPS][nwindow]; float dbb[NREPS][nwindow]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, indx, icol, dxy, lb, ub; float prod, v, ascale, bscale; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; bool good; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); float inr = 1.0f / nrows; #pragma unroll for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers if (istart + i - SKIP - 1 >= 0) { iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word address } else { iwords[i] = -1; } good = (iwords[i] >= 0); #pragma unroll for (j = 0; j < NREPS; j++) { // Get the B vector for this word indx = tid + j * dxy; if (good && indx < nrows) { bb[j][i] = B[indx + iwords[i]]; } else { bb[j][i] = 0; } dbb[j][i] = 0; } } for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < nwindow-1; i++) { // slide iwords down iwords[i] = iwords[i+1]; #pragma unroll for (j = 0; j < NREPS; j++) { bb[j][i] = bb[j][i+1]; // slide data down dbb[j][i] = dbb[j][i+1]; // slide deriv down } } good = (icol + SKIP < ncols); if (good) { iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word address } else { iwords[nwindow - 1] = -1; } good = good && iwords[nwindow-1] >= 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Get a new B column indx = tid + j * dxy; if (good && indx < nrows) { bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]]; } else { bb[j][nwindow - 1] = 0; } dbb[j][nwindow-1] = 0; if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column aa[j] = A[indx + iwords[SKIP]]; } else { aa[j] = 0; } } lb = LB[icol]; ub = UB[icol]; __syncthreads(); if (iwords[SKIP] >= 0) { #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols prod = 0; if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols } #pragma unroll for (k = 1; k < 32; k = k + k) { v = __shfl_down(prod, k); // Reduce within warp prod += v; } if (threadIdx.x == 0) { CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce across warps for (k = tid; k <= ub - lb; k += dxy) { CC[k] += CC[k + i * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i <= ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = 1.0f - v; // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { daa[j] = 0; } ascale = pow(max(0, iwords[SKIP])*inr + 1.0f, vexp); #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP && iwords[i] >= 0) { bscale = pow(max(0, iwords[i])*inr + 1.0f, vexp); v = lrate * CC[i - SKIP - lb]; #pragma unroll for (j = 0; j < NREPS; j++) { daa[j] += ascale * v * bb[j][i]; // Update A's derivative dbb[j][i] += bscale * v * aa[j]; // Update B's derivative } } } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { if (tid + j * dxy < nrows) { // Save the A column atomicAdd(&A[tid + j * dxy + iwords[SKIP]], daa[j]); atomicAdd(&B[tid + j * dxy + iwords[0]], dbb[j][0]); } } } __syncthreads(); } #pragma unroll for (i = 1; i < nwindow; i++) { // Clear out the derivative queue if (iwords[i] >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Save the B column if (tid + j * dxy < nrows) { atomicAdd(&B[tid + j * dxy + iwords[i]], dbb[j][i]); } } } } } /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) { const int nwindow = 2*SKIP+1; int iwords[nwindow]; float aa[NREPS]; float bb[NREPS][nwindow]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, indx, icol, dxy, lb, ub; float prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; bool good; double sum = 0; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); #pragma unroll for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers if (istart + i - SKIP - 1 >= 0) { iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word } else { iwords[i] = -1; } good = (iwords[i] >= 0); #pragma unroll for (j = 0; j < NREPS; j++) { // Get the B vector for this word indx = tid + j * dxy; if (good && indx < nrows) { bb[j][i] = B[indx + iwords[i]]; } else { bb[j][i] = 0; } } } for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < nwindow-1; i++) { // slide iwords down iwords[i] = iwords[i+1]; #pragma unroll for (j = 0; j < NREPS; j++) { bb[j][i] = bb[j][i+1]; // slide data down } } good = (icol + SKIP < ncols); if (good) { iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word } else { iwords[nwindow - 1] = -1; } good = good && iwords[nwindow-1] >= 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Get a new B column indx = tid + j * dxy; if (good && indx < nrows) { bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]]; } else { bb[j][nwindow - 1] = 0; } if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column aa[j] = A[indx + iwords[SKIP]]; } else { aa[j] = 0; } } lb = LB[icol]; ub = UB[icol]; __syncthreads(); #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols if (i >= SKIP + lb && i <= SKIP + ub) { if (i == SKIP || iwords[SKIP] < 0 || iwords[i] < 0) { // Give this word a large score (gives zero contribution to loss) prod = 20.0f; } else { prod = 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols } #pragma unroll for (k = 1; k < 32; k = k + k) { v = __shfl_down(prod, k); // Reduce within warp prod += v; } } if (threadIdx.x == 0) { CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce across warps for (k = tid; k <= ub - lb; k += dxy) { CC[k] += CC[k + i * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i <= ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = log(max(v, 1.0e-20f)); // Compute the loss } __syncthreads(); for (i = 1; i <= ub - lb; i = i + i) { if ((tid & (i-1)) == 0 && tid + i <= ub - lb) { CC[tid] += CC[tid + i]; } __syncthreads(); } sum += CC[0]; __syncthreads(); } if (tid == 0) { atomicAdd(&Retval[0], (float)sum); } } template<int NSKIP, int BYDIM> __global__ void __word2vecPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) { __shared__ float CC[NSKIP*2*BYDIM]; float aa; int ib[NSKIP*2]; float prods[NSKIP*2]; float bscale[NSKIP*2]; int ia, iword, lb, ub; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol, jcol; float bb, db, dv, v, ascale, tmp; float inr = 1.0f / nrows; for (icol = istart; icol < iend; icol++) { // Iterate over columns ia = nrows * W[icol]; if (ia >= 0) { // Load lb and ub values lb = LB[icol]; ub = UB[icol]; jcol = threadIdx.x - NSKIP; iword = -1; if (jcol >= lb && jcol <= ub) { // Load words in the window iword = W[icol + jcol]; } #pragma unroll for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods ib[i] = nrows * __shfl(iword, i); ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1); prods[i] = 0; prods[i+NSKIP] = 0; } for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words aa = A[i + ia]; #pragma unroll for (j = 0; j < NSKIP*2; j++) { if (ib[j] >= 0) { bb = B[i + ib[j]]; prods[j] += aa * bb; } } } #pragma unroll for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp #pragma unroll for (k = 1; k < 32; k = k+k) { tmp = __shfl_down(prods[j], k); prods[j] += tmp; } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (j = 0; j < 2*NSKIP; j++) { CC[j + NSKIP * 2 * threadIdx.y] = prods[j]; } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps __syncthreads(); for (j = tid; j < NSKIP * 2; j += dxy) { CC[j] += CC[j + i * NSKIP * 2]; } } __syncthreads(); for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = lrate * (1 - v); // All these pairs have label 1 } __syncthreads(); // Now do scaled gradients ascale = pow(max(0, ia)*inr + 1.0f, vexp); // Simulated ADAGRAD on A for (j = 0; j < NSKIP * 2; j++) { // Load B data if (ib[j] >= 0) { bscale[j] = pow(max(0, ib[j])*inr + 1.0f, vexp); // Simulated ADAGRAD on B } else { bscale[j] = 0; } prods[j] = CC[j]; } __syncthreads(); dv = 0; for (i = tid; i < nrows; i += dxy) { // Update vecs with derivatives aa = A[i + ia]; #pragma unroll for (j = 0; j < NSKIP * 2; j++) { // Load B data if (ib[j] >= 0) { bb = B[i + ib[j]]; dv += ascale * prods[j] * bb; db = bscale[j] * prods[j] * aa; atomicAdd(&B[i + ib[j]], db); // Update B } } atomicAdd(&A[i + ia], dv); // Update A } __syncthreads(); } } } template<int NSKIP, int BYDIM> __global__ void __word2vecEvalPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *retval) { __shared__ float CC[NSKIP*2*BYDIM]; float aa; float prods[NSKIP*2]; int ia, iword, lb, ub; int ib[NSKIP*2]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol, jcol; float bb, v, tmp, sum; sum = 0; for (icol = istart; icol < iend; icol++) { // Iterate over columns ia = nrows * W[icol]; if (ia >= 0) { // Load lb and ub values lb = LB[icol]; ub = UB[icol]; jcol = threadIdx.x - NSKIP; iword = -1; if (jcol >= lb && jcol <= ub) { // Load words in the window iword = W[icol + jcol]; } #pragma unroll for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods ib[i] = nrows * __shfl(iword, i); ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1); prods[i] = 0; prods[i+NSKIP] = 0; } for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words aa = A[i + ia]; #pragma unroll for (j = 0; j < NSKIP*2; j++) { if (ib[j] >= 0) { bb = B[i + ib[j]]; prods[j] += aa * bb; } } } #pragma unroll for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp #pragma unroll for (k = 1; k < 32; k = k+k) { tmp = __shfl_down(prods[j], k); prods[j] += tmp; } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (j = 0; j < 2*NSKIP; j++) { CC[j + NSKIP * 2 * threadIdx.y] = prods[j]; } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps __syncthreads(); for (j = tid; j < NSKIP * 2; j += dxy) { CC[j] += CC[j + i * NSKIP * 2]; } } __syncthreads(); for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = log(max(v, 1.0e-20f)); // All these pairs have label 1 } __syncthreads(); // Now sum likelihood over window for (i = 1; i < 2 * NSKIP; i = i + i) { if ((tid & (i-1)) == 0 && tid + i < 2 * NSKIP) { CC[tid] += CC[tid + i]; } __syncthreads(); } sum += CC[0]; __syncthreads(); } } if (tid == 0) { atomicAdd(&retval[0], (float)sum); } } /* * Combined forward-backward word2vec kernel */ template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float aa[NWA]; float bb[NWB]; float prods[NWA][NWB]; int ia[NWA]; int ib[NWB]; float bscale[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol; float dv, v, ascale; float inr = 1.0f / nrows; for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B bb[j] = B[i + ib[j]]; } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the products of these elements v = A[i + ia[j]]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += v * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = - lrate * v; // All these pairs have label 0 } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWA; j++) { // Load A data aa[j] = A[i + ia[j]]; } #pragma unroll for (k = 0; k < NWB; k++) { // Load B data bb[k] = B[i + ib[k]]; bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp); prods[0][k] = 0; } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the products ascale = pow(max(0, ia[j])*inr + 1.0f, vexp); dv = 0; #pragma unroll for (k = 0; k < NWB; k++) { v = CC[j + k * NWA]; dv += ascale * v * bb[k]; prods[0][k] += bscale[k] * v * aa[j]; } atomicAdd(&A[i + ia[j]], dv); // Update A } #pragma unroll for (k = 0; k < NWB; k++) { atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B } } __syncthreads(); } } /* * Combined forward-backward word2vec kernel */ template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float bb[NWB]; float prods[NWA][NWB]; int ia[NWA]; int ib[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol; float v; double sum = 0; for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B bb[j] = B[i + ib[j]]; } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the products of these elements v = A[i + ia[j]]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += v * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = log(max(1.0f - v, 1.0e-20f)); // All these pairs have label 0 } for (i = 1; i < NWA*NWB; i = i + i) { if ((tid & (i-1)) == 0 && tid + i < NWA*NWB) { CC[tid] += CC[tid + i]; } __syncthreads(); } sum += CC[0]; __syncthreads(); } if (tid == 0) { atomicAdd(&Retval[0], (float)sum); } } /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos_exp(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) { const int nwindow = 2*SKIP+1; float aa[NREPS]; float da[NREPS]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, icol, dxy, lb, ub, iword, cword; float bb, db, prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns iword = nrows * W[icol]; // Get the current word __syncthreads(); lb = LB[icol]; ub = UB[icol]; if (iword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get A aa[j] = A[tid + j * dxy + iword]; } else { aa[j] = 0; } } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols __syncthreads(); cword = nrows * W[icol + i]; // Get the current word prod = 0; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; prod += aa[j] * bb; // Compute the product between current A, B cols } } #pragma unroll for (k = 1; k < 32; k = k + k) { prod += __shfl_down(prod, k); // Reduce within warp } } if (threadIdx.x == 0) { CC[i - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } __syncthreads(); for (j = 1; j < blockDim.y; j++) { // Reduce across warps for (i = tid; i < ub - lb; i += dxy) { CC[i] += CC[i + j * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i < ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = lrate * (1.0f - v); // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { da[j] = 0; } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols cword = nrows * W[icol + i]; // Get the context word v = CC[i - lb]; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; da[j] += v * bb; db = v * aa[j]; atomicAdd(&B[tid + j * dxy + cword], db); } } } } #pragma unroll for (j = 0; j < NREPS; j++) { if (tid + j * dxy < nrows) { atomicAdd(&A[tid + j * dxy + iword], da[j]); } } } } } /* * Combined forward-backward word2vec kernel */ template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecNeg_old(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float dd[MAXD]; float prods[NWA][NWB]; float aa, v, sum; int ia[NWA]; int ib[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int i, j, k, icol; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B if (ib[j] >= 0) { dd[j] = B[i + ib[j]]; } else { dd[j] = 0; } } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the inner products of these elements if (ia[j] >= 0) { aa = A[i + ia[j]]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += aa * dd[k]; } } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = - lrate * v; // All these pairs have label 0 } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWB; j++) { // Load B data if (ib[j] >= 0) { dd[j] = B[i + ib[j]]; } else { dd[j] = 0; } } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the product if (ia[j] >= 0) { sum = 0; #pragma unroll for (k = 0; k < NWB; k++) { float xx = CC[j + k * NWA]; sum += xx * dd[k]; } atomicAdd(&A[i + ia[j]], sum); } } #pragma unroll for (j = 0; j < NWA; j++) { // Load A data if (ia[j] >= 0) { dd[j] = A[i + ia[j]]; } else { dd[j] = 0; } } #pragma unroll for (j = 0; j < NWB; j++) { // Now do the product if (ib[j] >= 0) { sum = 0; #pragma unroll for (k = 0; k < NWA; k++) { float xx = CC[k + j * NWA]; sum += xx * dd[k]; } atomicAdd(&B[i + ib[j]], sum); } } } __syncthreads(); } } /* * * Simple forward kernel for word2vec. Computes inner products of columns from A with columns from B. * The column indices are specified by two "word" matrices. The inner products are computed as an outer product * of the word matrices. * * NWA is the number of words per column in WA * NWB is the number of words per column in WB * * Columns of the output matrix C are <window> = NWA*NWB long, and contain inner products with corresponding columns of B. * */ template<int NWA, int NWB, int BDIM> __global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BDIM]; float aa; float bb[NWB]; float prods[NWA][NWB]; int wa[NWA]; int wb[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int i, j, k, icol; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { wa[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { wb[i] = WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B bb[j] = B[i + wb[j] * nrows]; } #pragma unroll for (j = 0; j < NWA; j++) { // Computes the products of these elements aa = A[i + wa[j] * nrows]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += aa * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); #pragma unroll for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWAB; i += dxy) { // Save to main memory C[i + icol * NWAB] = CC[i]; //atomicAdd(&C[i + icol * NWAB], CC[i]); } __syncthreads(); } } /* * * Simple backward kernel for word2vec. * Computes the gradient for A given B or vice-versa, and does an SGD update. * * NWA is the number of words per column in WA * NWB is the number of words per column in WB * */ template<int NWA, int NWB, int MAXDIM> __global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) { const int NWAB = NWA * NWB; float dd[MAXDIM]; int wa[NWA]; int wb[NWB]; __shared__ float cc[NWA*NWB]; int tid = threadIdx.x; int fid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int icol, i, j, k; float sum; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // iterate in columns #pragma unroll for (j = 0; j < NWA; j++) { wa[j] = WA[j + icol * NWA]; // Load the A word matrix } __syncthreads(); #pragma unroll for (j = 0; j < NWB; j++) { wb[j] = WB[j + icol * NWB]; // Load the B word matrix } for (i = fid; i < NWAB; i += dxy) { cc[i] = C[i + icol * NWAB]; } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWB; j++) { // Load the data dd[j] = B[i + wb[j] * nrows]; } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the product sum = 0; #pragma unroll for (k = 0; k < NWB; k++) { float xx = cc[j + k * NWA]; sum += xx * dd[k]; } atomicAdd(&A[i + wa[j] * nrows], sum * lrate); } #pragma unroll for (j = 0; j < NWA; j++) { // Load the data dd[j] = A[i + wa[j] * nrows]; } #pragma unroll for (j = 0; j < NWB; j++) { // Now do the product sum = 0; #pragma unroll for (k = 0; k < NWA; k++) { float xx = cc[k + j * NWA]; sum += xx * dd[k]; } atomicAdd(&B[i + wb[j] * nrows], sum * lrate); } } } } #else template<int SKIP, int BYDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {} template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {} template<int SKIP, int BYDIM, int NREPS> __global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {} template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {} template<int NWA, int NWB, int BDIM> __global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {} template<int NWA, int NWB, int MAXDIM> __global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {} #endif int word2vecPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) { dim3 threads(32, CDIM, 1); int nblocks = min(64, ncols); switch(skip) { case 5 :hipLaunchKernelGGL(( __word2vecPos<5, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 3 :hipLaunchKernelGGL(( __word2vecPos<3, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 2 :hipLaunchKernelGGL(( __word2vecPos<2, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; default : printf("word2vecPos unsupport size %d\n", skip); return 1; } // hipDeviceSynchronize(); int err = hipGetLastError(); return err; } int word2vecNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) { dim3 threads(32, BYDIMF, 1); int nblocks = min(2048, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001:hipLaunchKernelGGL(( __word2vecNeg<5,1,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 50005:hipLaunchKernelGGL(( __word2vecNeg<5,5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 100005:hipLaunchKernelGGL(( __word2vecNeg<10,5,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 50010:hipLaunchKernelGGL(( __word2vecNeg<5,10,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break; // case 150010: __word2vecNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate); break; default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1; } // hipDeviceSynchronize(); int err = hipGetLastError(); return err; } int word2vecEvalPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float *Retval) { dim3 threads(32, CDIM, 1); int nblocks = min(64, ncols); switch(skip) { case 5 :hipLaunchKernelGGL(( __word2vecEvalPos<5, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, Retval); break; case 3 :hipLaunchKernelGGL(( __word2vecEvalPos<3, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, Retval); break; case 2 :hipLaunchKernelGGL(( __word2vecEvalPos<2, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, Retval); break; default : printf("word2vecEvalPos unsupport size %d\n", skip); return 1; } hipDeviceSynchronize(); int err = hipGetLastError(); return err; } int word2vecEvalNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *Retval) { dim3 threads(32, BYDIMF, 1); int nblocks = min(2048, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001:hipLaunchKernelGGL(( __word2vecEvalNeg<5,1,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break; case 50005:hipLaunchKernelGGL(( __word2vecEvalNeg<5,5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break; case 100005:hipLaunchKernelGGL(( __word2vecEvalNeg<10,5,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break; case 50010:hipLaunchKernelGGL(( __word2vecEvalNeg<5,10,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break; // case 150010: __word2vecEvalNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; default : printf("word2vecEvalNeg unsupport size combination %d %d\n", nwa, nwb); return 1; } hipDeviceSynchronize(); int err = hipGetLastError(); return err; } int word2vecFwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C) { dim3 threads(32, BYDIMF, 1); int nblocks = min(4096, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001:hipLaunchKernelGGL(( __word2vecFwd<5,1,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C); break; case 50005:hipLaunchKernelGGL(( __word2vecFwd<5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C); break; case 100005:hipLaunchKernelGGL(( __word2vecFwd<10,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C); break; default : printf("word2vecFwd unsupport size combination %d %d\n", nwa, nwb); return 1; } hipDeviceSynchronize(); int err = hipGetLastError(); return err; } int word2vecBwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C, float lrate) { dim3 threads(32*BYDIMB, 1, 1); int nblocks = min(2048, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001:hipLaunchKernelGGL(( __word2vecBwd<5,1,5>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C, lrate); break; case 50005:hipLaunchKernelGGL(( __word2vecBwd<5,5,5>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C, lrate); break; case 100005:hipLaunchKernelGGL(( __word2vecBwd<10,5,10>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C, lrate); break; default : printf("word2vecBwd unsupport size combination %d %d\n", nwa, nwb); return 1; } hipDeviceSynchronize(); int err = hipGetLastError(); return err; }
36e78980722bc3cf74813069f120e320c990e434.cu
#include <cuda_runtime.h> #include <curand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> #define BYDIMF 2 #define CDIM 5 #define BYDIMB 5 #if __CUDA_ARCH__ >= 300 /* * Positive kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) { const int nwindow = 2*SKIP+1; int iwords[nwindow]; float aa[NREPS]; float daa[NREPS]; float bb[NREPS][nwindow]; float dbb[NREPS][nwindow]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, indx, icol, dxy, lb, ub; float prod, v, ascale, bscale; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; bool good; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); float inr = 1.0f / nrows; #pragma unroll for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers if (istart + i - SKIP - 1 >= 0) { iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word address } else { iwords[i] = -1; } good = (iwords[i] >= 0); #pragma unroll for (j = 0; j < NREPS; j++) { // Get the B vector for this word indx = tid + j * dxy; if (good && indx < nrows) { bb[j][i] = B[indx + iwords[i]]; } else { bb[j][i] = 0; } dbb[j][i] = 0; } } for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < nwindow-1; i++) { // slide iwords down iwords[i] = iwords[i+1]; #pragma unroll for (j = 0; j < NREPS; j++) { bb[j][i] = bb[j][i+1]; // slide data down dbb[j][i] = dbb[j][i+1]; // slide deriv down } } good = (icol + SKIP < ncols); if (good) { iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word address } else { iwords[nwindow - 1] = -1; } good = good && iwords[nwindow-1] >= 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Get a new B column indx = tid + j * dxy; if (good && indx < nrows) { bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]]; } else { bb[j][nwindow - 1] = 0; } dbb[j][nwindow-1] = 0; if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column aa[j] = A[indx + iwords[SKIP]]; } else { aa[j] = 0; } } lb = LB[icol]; ub = UB[icol]; __syncthreads(); if (iwords[SKIP] >= 0) { #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols prod = 0; if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols } #pragma unroll for (k = 1; k < 32; k = k + k) { v = __shfl_down(prod, k); // Reduce within warp prod += v; } if (threadIdx.x == 0) { CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce across warps for (k = tid; k <= ub - lb; k += dxy) { CC[k] += CC[k + i * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i <= ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = 1.0f - v; // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { daa[j] = 0; } ascale = pow(max(0, iwords[SKIP])*inr + 1.0f, vexp); #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP && iwords[i] >= 0) { bscale = pow(max(0, iwords[i])*inr + 1.0f, vexp); v = lrate * CC[i - SKIP - lb]; #pragma unroll for (j = 0; j < NREPS; j++) { daa[j] += ascale * v * bb[j][i]; // Update A's derivative dbb[j][i] += bscale * v * aa[j]; // Update B's derivative } } } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { if (tid + j * dxy < nrows) { // Save the A column atomicAdd(&A[tid + j * dxy + iwords[SKIP]], daa[j]); atomicAdd(&B[tid + j * dxy + iwords[0]], dbb[j][0]); } } } __syncthreads(); } #pragma unroll for (i = 1; i < nwindow; i++) { // Clear out the derivative queue if (iwords[i] >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Save the B column if (tid + j * dxy < nrows) { atomicAdd(&B[tid + j * dxy + iwords[i]], dbb[j][i]); } } } } } /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) { const int nwindow = 2*SKIP+1; int iwords[nwindow]; float aa[NREPS]; float bb[NREPS][nwindow]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, indx, icol, dxy, lb, ub; float prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; bool good; double sum = 0; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); #pragma unroll for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers if (istart + i - SKIP - 1 >= 0) { iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word } else { iwords[i] = -1; } good = (iwords[i] >= 0); #pragma unroll for (j = 0; j < NREPS; j++) { // Get the B vector for this word indx = tid + j * dxy; if (good && indx < nrows) { bb[j][i] = B[indx + iwords[i]]; } else { bb[j][i] = 0; } } } for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < nwindow-1; i++) { // slide iwords down iwords[i] = iwords[i+1]; #pragma unroll for (j = 0; j < NREPS; j++) { bb[j][i] = bb[j][i+1]; // slide data down } } good = (icol + SKIP < ncols); if (good) { iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word } else { iwords[nwindow - 1] = -1; } good = good && iwords[nwindow-1] >= 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Get a new B column indx = tid + j * dxy; if (good && indx < nrows) { bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]]; } else { bb[j][nwindow - 1] = 0; } if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column aa[j] = A[indx + iwords[SKIP]]; } else { aa[j] = 0; } } lb = LB[icol]; ub = UB[icol]; __syncthreads(); #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols if (i >= SKIP + lb && i <= SKIP + ub) { if (i == SKIP || iwords[SKIP] < 0 || iwords[i] < 0) { // Give this word a large score (gives zero contribution to loss) prod = 20.0f; } else { prod = 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols } #pragma unroll for (k = 1; k < 32; k = k + k) { v = __shfl_down(prod, k); // Reduce within warp prod += v; } } if (threadIdx.x == 0) { CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce across warps for (k = tid; k <= ub - lb; k += dxy) { CC[k] += CC[k + i * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i <= ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = log(max(v, 1.0e-20f)); // Compute the loss } __syncthreads(); for (i = 1; i <= ub - lb; i = i + i) { if ((tid & (i-1)) == 0 && tid + i <= ub - lb) { CC[tid] += CC[tid + i]; } __syncthreads(); } sum += CC[0]; __syncthreads(); } if (tid == 0) { atomicAdd(&Retval[0], (float)sum); } } template<int NSKIP, int BYDIM> __global__ void __word2vecPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) { __shared__ float CC[NSKIP*2*BYDIM]; float aa; int ib[NSKIP*2]; float prods[NSKIP*2]; float bscale[NSKIP*2]; int ia, iword, lb, ub; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol, jcol; float bb, db, dv, v, ascale, tmp; float inr = 1.0f / nrows; for (icol = istart; icol < iend; icol++) { // Iterate over columns ia = nrows * W[icol]; if (ia >= 0) { // Load lb and ub values lb = LB[icol]; ub = UB[icol]; jcol = threadIdx.x - NSKIP; iword = -1; if (jcol >= lb && jcol <= ub) { // Load words in the window iword = W[icol + jcol]; } #pragma unroll for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods ib[i] = nrows * __shfl(iword, i); ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1); prods[i] = 0; prods[i+NSKIP] = 0; } for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words aa = A[i + ia]; #pragma unroll for (j = 0; j < NSKIP*2; j++) { if (ib[j] >= 0) { bb = B[i + ib[j]]; prods[j] += aa * bb; } } } #pragma unroll for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp #pragma unroll for (k = 1; k < 32; k = k+k) { tmp = __shfl_down(prods[j], k); prods[j] += tmp; } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (j = 0; j < 2*NSKIP; j++) { CC[j + NSKIP * 2 * threadIdx.y] = prods[j]; } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps __syncthreads(); for (j = tid; j < NSKIP * 2; j += dxy) { CC[j] += CC[j + i * NSKIP * 2]; } } __syncthreads(); for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = lrate * (1 - v); // All these pairs have label 1 } __syncthreads(); // Now do scaled gradients ascale = pow(max(0, ia)*inr + 1.0f, vexp); // Simulated ADAGRAD on A for (j = 0; j < NSKIP * 2; j++) { // Load B data if (ib[j] >= 0) { bscale[j] = pow(max(0, ib[j])*inr + 1.0f, vexp); // Simulated ADAGRAD on B } else { bscale[j] = 0; } prods[j] = CC[j]; } __syncthreads(); dv = 0; for (i = tid; i < nrows; i += dxy) { // Update vecs with derivatives aa = A[i + ia]; #pragma unroll for (j = 0; j < NSKIP * 2; j++) { // Load B data if (ib[j] >= 0) { bb = B[i + ib[j]]; dv += ascale * prods[j] * bb; db = bscale[j] * prods[j] * aa; atomicAdd(&B[i + ib[j]], db); // Update B } } atomicAdd(&A[i + ia], dv); // Update A } __syncthreads(); } } } template<int NSKIP, int BYDIM> __global__ void __word2vecEvalPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *retval) { __shared__ float CC[NSKIP*2*BYDIM]; float aa; float prods[NSKIP*2]; int ia, iword, lb, ub; int ib[NSKIP*2]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol, jcol; float bb, v, tmp, sum; sum = 0; for (icol = istart; icol < iend; icol++) { // Iterate over columns ia = nrows * W[icol]; if (ia >= 0) { // Load lb and ub values lb = LB[icol]; ub = UB[icol]; jcol = threadIdx.x - NSKIP; iword = -1; if (jcol >= lb && jcol <= ub) { // Load words in the window iword = W[icol + jcol]; } #pragma unroll for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods ib[i] = nrows * __shfl(iword, i); ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1); prods[i] = 0; prods[i+NSKIP] = 0; } for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words aa = A[i + ia]; #pragma unroll for (j = 0; j < NSKIP*2; j++) { if (ib[j] >= 0) { bb = B[i + ib[j]]; prods[j] += aa * bb; } } } #pragma unroll for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp #pragma unroll for (k = 1; k < 32; k = k+k) { tmp = __shfl_down(prods[j], k); prods[j] += tmp; } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (j = 0; j < 2*NSKIP; j++) { CC[j + NSKIP * 2 * threadIdx.y] = prods[j]; } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps __syncthreads(); for (j = tid; j < NSKIP * 2; j += dxy) { CC[j] += CC[j + i * NSKIP * 2]; } } __syncthreads(); for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = log(max(v, 1.0e-20f)); // All these pairs have label 1 } __syncthreads(); // Now sum likelihood over window for (i = 1; i < 2 * NSKIP; i = i + i) { if ((tid & (i-1)) == 0 && tid + i < 2 * NSKIP) { CC[tid] += CC[tid + i]; } __syncthreads(); } sum += CC[0]; __syncthreads(); } } if (tid == 0) { atomicAdd(&retval[0], (float)sum); } } /* * Combined forward-backward word2vec kernel */ template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float aa[NWA]; float bb[NWB]; float prods[NWA][NWB]; int ia[NWA]; int ib[NWB]; float bscale[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol; float dv, v, ascale; float inr = 1.0f / nrows; for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B bb[j] = B[i + ib[j]]; } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the products of these elements v = A[i + ia[j]]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += v * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = - lrate * v; // All these pairs have label 0 } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWA; j++) { // Load A data aa[j] = A[i + ia[j]]; } #pragma unroll for (k = 0; k < NWB; k++) { // Load B data bb[k] = B[i + ib[k]]; bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp); prods[0][k] = 0; } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the products ascale = pow(max(0, ia[j])*inr + 1.0f, vexp); dv = 0; #pragma unroll for (k = 0; k < NWB; k++) { v = CC[j + k * NWA]; dv += ascale * v * bb[k]; prods[0][k] += bscale[k] * v * aa[j]; } atomicAdd(&A[i + ia[j]], dv); // Update A } #pragma unroll for (k = 0; k < NWB; k++) { atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B } } __syncthreads(); } } /* * Combined forward-backward word2vec kernel */ template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float bb[NWB]; float prods[NWA][NWB]; int ia[NWA]; int ib[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol; float v; double sum = 0; for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B bb[j] = B[i + ib[j]]; } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the products of these elements v = A[i + ia[j]]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += v * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = log(max(1.0f - v, 1.0e-20f)); // All these pairs have label 0 } for (i = 1; i < NWA*NWB; i = i + i) { if ((tid & (i-1)) == 0 && tid + i < NWA*NWB) { CC[tid] += CC[tid + i]; } __syncthreads(); } sum += CC[0]; __syncthreads(); } if (tid == 0) { atomicAdd(&Retval[0], (float)sum); } } /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos_exp(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) { const int nwindow = 2*SKIP+1; float aa[NREPS]; float da[NREPS]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, icol, dxy, lb, ub, iword, cword; float bb, db, prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns iword = nrows * W[icol]; // Get the current word __syncthreads(); lb = LB[icol]; ub = UB[icol]; if (iword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get A aa[j] = A[tid + j * dxy + iword]; } else { aa[j] = 0; } } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols __syncthreads(); cword = nrows * W[icol + i]; // Get the current word prod = 0; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; prod += aa[j] * bb; // Compute the product between current A, B cols } } #pragma unroll for (k = 1; k < 32; k = k + k) { prod += __shfl_down(prod, k); // Reduce within warp } } if (threadIdx.x == 0) { CC[i - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } __syncthreads(); for (j = 1; j < blockDim.y; j++) { // Reduce across warps for (i = tid; i < ub - lb; i += dxy) { CC[i] += CC[i + j * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i < ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = lrate * (1.0f - v); // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { da[j] = 0; } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols cword = nrows * W[icol + i]; // Get the context word v = CC[i - lb]; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; da[j] += v * bb; db = v * aa[j]; atomicAdd(&B[tid + j * dxy + cword], db); } } } } #pragma unroll for (j = 0; j < NREPS; j++) { if (tid + j * dxy < nrows) { atomicAdd(&A[tid + j * dxy + iword], da[j]); } } } } } /* * Combined forward-backward word2vec kernel */ template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecNeg_old(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float dd[MAXD]; float prods[NWA][NWB]; float aa, v, sum; int ia[NWA]; int ib[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int i, j, k, icol; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B if (ib[j] >= 0) { dd[j] = B[i + ib[j]]; } else { dd[j] = 0; } } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the inner products of these elements if (ia[j] >= 0) { aa = A[i + ia[j]]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += aa * dd[k]; } } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = - lrate * v; // All these pairs have label 0 } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWB; j++) { // Load B data if (ib[j] >= 0) { dd[j] = B[i + ib[j]]; } else { dd[j] = 0; } } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the product if (ia[j] >= 0) { sum = 0; #pragma unroll for (k = 0; k < NWB; k++) { float xx = CC[j + k * NWA]; sum += xx * dd[k]; } atomicAdd(&A[i + ia[j]], sum); } } #pragma unroll for (j = 0; j < NWA; j++) { // Load A data if (ia[j] >= 0) { dd[j] = A[i + ia[j]]; } else { dd[j] = 0; } } #pragma unroll for (j = 0; j < NWB; j++) { // Now do the product if (ib[j] >= 0) { sum = 0; #pragma unroll for (k = 0; k < NWA; k++) { float xx = CC[k + j * NWA]; sum += xx * dd[k]; } atomicAdd(&B[i + ib[j]], sum); } } } __syncthreads(); } } /* * * Simple forward kernel for word2vec. Computes inner products of columns from A with columns from B. * The column indices are specified by two "word" matrices. The inner products are computed as an outer product * of the word matrices. * * NWA is the number of words per column in WA * NWB is the number of words per column in WB * * Columns of the output matrix C are <window> = NWA*NWB long, and contain inner products with corresponding columns of B. * */ template<int NWA, int NWB, int BDIM> __global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BDIM]; float aa; float bb[NWB]; float prods[NWA][NWB]; int wa[NWA]; int wb[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int i, j, k, icol; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { wa[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { wb[i] = WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B bb[j] = B[i + wb[j] * nrows]; } #pragma unroll for (j = 0; j < NWA; j++) { // Computes the products of these elements aa = A[i + wa[j] * nrows]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += aa * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); #pragma unroll for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWAB; i += dxy) { // Save to main memory C[i + icol * NWAB] = CC[i]; //atomicAdd(&C[i + icol * NWAB], CC[i]); } __syncthreads(); } } /* * * Simple backward kernel for word2vec. * Computes the gradient for A given B or vice-versa, and does an SGD update. * * NWA is the number of words per column in WA * NWB is the number of words per column in WB * */ template<int NWA, int NWB, int MAXDIM> __global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) { const int NWAB = NWA * NWB; float dd[MAXDIM]; int wa[NWA]; int wb[NWB]; __shared__ float cc[NWA*NWB]; int tid = threadIdx.x; int fid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int icol, i, j, k; float sum; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // iterate in columns #pragma unroll for (j = 0; j < NWA; j++) { wa[j] = WA[j + icol * NWA]; // Load the A word matrix } __syncthreads(); #pragma unroll for (j = 0; j < NWB; j++) { wb[j] = WB[j + icol * NWB]; // Load the B word matrix } for (i = fid; i < NWAB; i += dxy) { cc[i] = C[i + icol * NWAB]; } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWB; j++) { // Load the data dd[j] = B[i + wb[j] * nrows]; } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the product sum = 0; #pragma unroll for (k = 0; k < NWB; k++) { float xx = cc[j + k * NWA]; sum += xx * dd[k]; } atomicAdd(&A[i + wa[j] * nrows], sum * lrate); } #pragma unroll for (j = 0; j < NWA; j++) { // Load the data dd[j] = A[i + wa[j] * nrows]; } #pragma unroll for (j = 0; j < NWB; j++) { // Now do the product sum = 0; #pragma unroll for (k = 0; k < NWA; k++) { float xx = cc[k + j * NWA]; sum += xx * dd[k]; } atomicAdd(&B[i + wb[j] * nrows], sum * lrate); } } } } #else template<int SKIP, int BYDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {} template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {} template<int SKIP, int BYDIM, int NREPS> __global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {} template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {} template<int NWA, int NWB, int BDIM> __global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {} template<int NWA, int NWB, int MAXDIM> __global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {} #endif int word2vecPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) { dim3 threads(32, CDIM, 1); int nblocks = min(64, ncols); switch(skip) { case 5 : __word2vecPos<5, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 3 : __word2vecPos<3, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 2 : __word2vecPos<2, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; default : printf("word2vecPos unsupport size %d\n", skip); return 1; } // cudaDeviceSynchronize(); int err = cudaGetLastError(); return err; } int word2vecNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) { dim3 threads(32, BYDIMF, 1); int nblocks = min(2048, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001: __word2vecNeg<5,1,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 50005: __word2vecNeg<5,5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 100005: __word2vecNeg<10,5,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 50010: __word2vecNeg<5,10,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; // case 150010: __word2vecNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate); break; default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1; } // cudaDeviceSynchronize(); int err = cudaGetLastError(); return err; } int word2vecEvalPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float *Retval) { dim3 threads(32, CDIM, 1); int nblocks = min(64, ncols); switch(skip) { case 5 : __word2vecEvalPos<5, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; case 3 : __word2vecEvalPos<3, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; case 2 : __word2vecEvalPos<2, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; default : printf("word2vecEvalPos unsupport size %d\n", skip); return 1; } cudaDeviceSynchronize(); int err = cudaGetLastError(); return err; } int word2vecEvalNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *Retval) { dim3 threads(32, BYDIMF, 1); int nblocks = min(2048, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001: __word2vecEvalNeg<5,1,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; case 50005: __word2vecEvalNeg<5,5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; case 100005: __word2vecEvalNeg<10,5,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; case 50010: __word2vecEvalNeg<5,10,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; // case 150010: __word2vecEvalNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; default : printf("word2vecEvalNeg unsupport size combination %d %d\n", nwa, nwb); return 1; } cudaDeviceSynchronize(); int err = cudaGetLastError(); return err; } int word2vecFwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C) { dim3 threads(32, BYDIMF, 1); int nblocks = min(4096, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001: __word2vecFwd<5,1,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break; case 50005: __word2vecFwd<5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break; case 100005: __word2vecFwd<10,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break; default : printf("word2vecFwd unsupport size combination %d %d\n", nwa, nwb); return 1; } cudaDeviceSynchronize(); int err = cudaGetLastError(); return err; } int word2vecBwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C, float lrate) { dim3 threads(32*BYDIMB, 1, 1); int nblocks = min(2048, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001: __word2vecBwd<5,1,5><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break; case 50005: __word2vecBwd<5,5,5><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break; case 100005: __word2vecBwd<10,5,10><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break; default : printf("word2vecBwd unsupport size combination %d %d\n", nwa, nwb); return 1; } cudaDeviceSynchronize(); int err = cudaGetLastError(); return err; }
60fad3fcc6502b43da5de834ca057829ce2d1237.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <limits.h> #include "error.h" #define BLOCK_SIZE 1024 #define WARP_SIZE 32 #define idx(ind) \ ((WARP_SIZE + 1) * (ind / WARP_SIZE) + (ind % WARP_SIZE)) int next_multiple(int n, int m){ int r = n % m; if(r == 0) return n; return n + (m - r); } __device__ void conditional_swap(int *x, int *y){ int x_val = *x; int y_val = *y; if (x_val > y_val) { *y = x_val; *x = y_val; } } __global__ void int_memset(int *dev_arr, int n, int val){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int offset = gridDim.x * blockDim.x; for(int i = idx; i < n; i += offset) dev_arr[i] = val; } __global__ void sort_blocks(int *dev_arr){ __shared__ int sarr[BLOCK_SIZE + (BLOCK_SIZE / WARP_SIZE) + 1]; sarr[idx(threadIdx.x)] = dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE]; int second_half_idx = threadIdx.x + BLOCK_SIZE / 2; sarr[idx(second_half_idx)] = dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE + BLOCK_SIZE / 2]; if(threadIdx.x == 0) sarr[idx(BLOCK_SIZE)] = INT_MAX; __syncthreads(); int swap1_idx1 = 2 * threadIdx.x; int swap1_idx2 = 2 * threadIdx.x + 1; int swap2_idx2 = 2 * threadIdx.x + 2; for(int i = 0; i < BLOCK_SIZE; ++i){ conditional_swap(sarr + idx(swap1_idx1), sarr + idx(swap1_idx2)); __syncthreads(); conditional_swap(sarr + idx(swap1_idx2), sarr + idx(swap2_idx2)); __syncthreads(); } __syncthreads(); dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE] = sarr[idx(threadIdx.x)]; dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE + BLOCK_SIZE / 2] = sarr[idx(second_half_idx)]; } __global__ void merge(int *dev_arr, int iter, int type){ __shared__ int sarr[BLOCK_SIZE + (BLOCK_SIZE / WARP_SIZE)]; sarr[idx(threadIdx.x)] = dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE]; int load_second_half_idx = BLOCK_SIZE - threadIdx.x - 1; sarr[idx(load_second_half_idx)] = dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE + BLOCK_SIZE / 2]; __syncthreads(); for(int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2){ int i = threadIdx.x / stride; int j = threadIdx.x % stride; int swap_idx1 = 2 * stride * i + j; int swap_idx2 = 2 * stride * i + j + stride; __syncthreads(); conditional_swap(sarr + idx(swap_idx1), sarr + idx(swap_idx2)); } __syncthreads(); dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE] = sarr[idx(threadIdx.x)]; int store_second_half_idx = threadIdx.x + BLOCK_SIZE / 2;; dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE + BLOCK_SIZE / 2] = sarr[idx(store_second_half_idx)]; } void block_odd_even_sort(int *arr, int n){ int dev_n = next_multiple(n, BLOCK_SIZE); int n_blocks = dev_n / BLOCK_SIZE; int *dev_arr; CHECK_CUDA_CALL_ERROR(hipMalloc(&dev_arr, dev_n * sizeof(int))); CHECK_CUDA_CALL_ERROR(hipMemcpy(dev_arr, arr, n * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( int_memset), dim3(1), dim3(BLOCK_SIZE), 0, 0, dev_arr + n, (dev_n - n), INT_MIN); CHECK_CUDA_KERNEL_ERROR(); hipLaunchKernelGGL(( sort_blocks), dim3(n_blocks), dim3(BLOCK_SIZE / 2), 0, 0, dev_arr); CHECK_CUDA_KERNEL_ERROR(); if(n_blocks == 1){ CHECK_CUDA_CALL_ERROR(hipMemcpy(arr, dev_arr + (dev_n - n), n * sizeof(int), hipMemcpyDeviceToHost)); CHECK_CUDA_CALL_ERROR(hipFree(dev_arr)); return; } for(int iter = 0; iter < n_blocks; ++iter){ hipLaunchKernelGGL(( merge), dim3(n_blocks - 1), dim3(BLOCK_SIZE / 2), 0, 0, dev_arr + BLOCK_SIZE / 2, iter, 0); CHECK_CUDA_KERNEL_ERROR(); hipLaunchKernelGGL(( merge), dim3(n_blocks), dim3(BLOCK_SIZE / 2), 0, 0, dev_arr, iter, 1); CHECK_CUDA_KERNEL_ERROR(); } CHECK_CUDA_CALL_ERROR(hipMemcpy(arr, dev_arr + (dev_n - n), n * sizeof(int), hipMemcpyDeviceToHost)); CHECK_CUDA_CALL_ERROR(hipFree(dev_arr)); } int main(){ init_error_handling(); unsigned int n; assert(fread(&n, sizeof(int), 1, stdin) == 1); if(n == 0) return 0; int *arr = (int*)malloc(n * sizeof(int)); assert(fread(arr, sizeof(int), n, stdin) == n); block_odd_even_sort(arr, n); assert(fwrite(arr, sizeof(int), n, stdout) == n); free(arr); }
60fad3fcc6502b43da5de834ca057829ce2d1237.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <limits.h> #include "error.h" #define BLOCK_SIZE 1024 #define WARP_SIZE 32 #define idx(ind) \ ((WARP_SIZE + 1) * (ind / WARP_SIZE) + (ind % WARP_SIZE)) int next_multiple(int n, int m){ int r = n % m; if(r == 0) return n; return n + (m - r); } __device__ void conditional_swap(int *x, int *y){ int x_val = *x; int y_val = *y; if (x_val > y_val) { *y = x_val; *x = y_val; } } __global__ void int_memset(int *dev_arr, int n, int val){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int offset = gridDim.x * blockDim.x; for(int i = idx; i < n; i += offset) dev_arr[i] = val; } __global__ void sort_blocks(int *dev_arr){ __shared__ int sarr[BLOCK_SIZE + (BLOCK_SIZE / WARP_SIZE) + 1]; sarr[idx(threadIdx.x)] = dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE]; int second_half_idx = threadIdx.x + BLOCK_SIZE / 2; sarr[idx(second_half_idx)] = dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE + BLOCK_SIZE / 2]; if(threadIdx.x == 0) sarr[idx(BLOCK_SIZE)] = INT_MAX; __syncthreads(); int swap1_idx1 = 2 * threadIdx.x; int swap1_idx2 = 2 * threadIdx.x + 1; int swap2_idx2 = 2 * threadIdx.x + 2; for(int i = 0; i < BLOCK_SIZE; ++i){ conditional_swap(sarr + idx(swap1_idx1), sarr + idx(swap1_idx2)); __syncthreads(); conditional_swap(sarr + idx(swap1_idx2), sarr + idx(swap2_idx2)); __syncthreads(); } __syncthreads(); dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE] = sarr[idx(threadIdx.x)]; dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE + BLOCK_SIZE / 2] = sarr[idx(second_half_idx)]; } __global__ void merge(int *dev_arr, int iter, int type){ __shared__ int sarr[BLOCK_SIZE + (BLOCK_SIZE / WARP_SIZE)]; sarr[idx(threadIdx.x)] = dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE]; int load_second_half_idx = BLOCK_SIZE - threadIdx.x - 1; sarr[idx(load_second_half_idx)] = dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE + BLOCK_SIZE / 2]; __syncthreads(); for(int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2){ int i = threadIdx.x / stride; int j = threadIdx.x % stride; int swap_idx1 = 2 * stride * i + j; int swap_idx2 = 2 * stride * i + j + stride; __syncthreads(); conditional_swap(sarr + idx(swap_idx1), sarr + idx(swap_idx2)); } __syncthreads(); dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE] = sarr[idx(threadIdx.x)]; int store_second_half_idx = threadIdx.x + BLOCK_SIZE / 2;; dev_arr[threadIdx.x + blockIdx.x * BLOCK_SIZE + BLOCK_SIZE / 2] = sarr[idx(store_second_half_idx)]; } void block_odd_even_sort(int *arr, int n){ int dev_n = next_multiple(n, BLOCK_SIZE); int n_blocks = dev_n / BLOCK_SIZE; int *dev_arr; CHECK_CUDA_CALL_ERROR(cudaMalloc(&dev_arr, dev_n * sizeof(int))); CHECK_CUDA_CALL_ERROR(cudaMemcpy(dev_arr, arr, n * sizeof(int), cudaMemcpyHostToDevice)); int_memset<<<1, BLOCK_SIZE>>>(dev_arr + n, (dev_n - n), INT_MIN); CHECK_CUDA_KERNEL_ERROR(); sort_blocks<<<n_blocks, BLOCK_SIZE / 2>>>(dev_arr); CHECK_CUDA_KERNEL_ERROR(); if(n_blocks == 1){ CHECK_CUDA_CALL_ERROR(cudaMemcpy(arr, dev_arr + (dev_n - n), n * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL_ERROR(cudaFree(dev_arr)); return; } for(int iter = 0; iter < n_blocks; ++iter){ merge<<<n_blocks - 1, BLOCK_SIZE / 2>>>(dev_arr + BLOCK_SIZE / 2, iter, 0); CHECK_CUDA_KERNEL_ERROR(); merge<<<n_blocks, BLOCK_SIZE / 2>>>(dev_arr, iter, 1); CHECK_CUDA_KERNEL_ERROR(); } CHECK_CUDA_CALL_ERROR(cudaMemcpy(arr, dev_arr + (dev_n - n), n * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL_ERROR(cudaFree(dev_arr)); } int main(){ init_error_handling(); unsigned int n; assert(fread(&n, sizeof(int), 1, stdin) == 1); if(n == 0) return 0; int *arr = (int*)malloc(n * sizeof(int)); assert(fread(arr, sizeof(int), n, stdin) == n); block_odd_even_sort(arr, n); assert(fwrite(arr, sizeof(int), n, stdout) == n); free(arr); }
c2518f277d36c4da59030a0036fc45a6412b8342.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // -*- coding:utf-8 -*- /* * Copyright (C) 2016 Sony Corporation * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Sony Corporation; * the contents of this file is not to be disclosed to third parties, copied * or duplicated in any form, in whole or in part, without the prior written * permission of Sony Corporation. */ #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/mean_subtraction.hpp> #include <nbla/cuda/math.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> void MeanSubtractionCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->update_runing_mean_) { // Training mode. forward_impl_batch(inputs, outputs); } else { // Testing mode. forward_impl_global(inputs, outputs); } } template <typename T> __global__ void kernel_mean_subtraction_inc_t(T *t, const int max) { if (t[0] < max) { t[0] = t[0] + 1; } } template <typename T> __global__ void kernel_mean_subtraction_forward_batch(const int size1_, const int size0_, const T *x, T *m, T *rm, T *y, const int *t) { NBLA_CUDA_KERNEL_LOOP(i1, size1_) { T coef = 1.0 / ((*t) + 1); // Batch mean T mean = 0; for (int i0 = 0; i0 < size0_; ++i0) { mean += x[i1 + i0 * size1_]; } m[i1] = mean / size0_; // Moving mean rm[i1] = rm[i1] + (m[i1] - rm[i1]) * coef; // Output for (int i0 = 0; i0 < size0_; ++i0) { y[i1 + i0 * size1_] = x[i1 + i0 * size1_] - rm[i1]; } } } template <class T> void MeanSubtractionCuda<T>::forward_impl_batch(const Variables &inputs, const Variables &outputs) { // Inputs const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); // Output T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_); Variable *batch_mean = &this->mean_; T *m = batch_mean->cast_data_and_get_pointer<T>(this->ctx_); // batch mean // Inputs/Outputs T *rm = inputs[1]->cast_data_and_get_pointer<T>(this->ctx_); // running mean int *t = inputs[2]->cast_data_and_get_pointer<int>(this->ctx_); // running count NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_mean_subtraction_forward_batch, this->size1_, this->size0_, x, m, rm, y, t); hipLaunchKernelGGL(( kernel_mean_subtraction_inc_t), dim3(1), dim3(1), 0, 0, t, std::numeric_limits<int>::max()); } template <typename T> __global__ void kernel_mean_subtraction_forward_global(const int size1_, const int size0_, const T *x, const T *rm, T *y) { NBLA_CUDA_KERNEL_LOOP(i1, size1_) { for (int i0 = 0; i0 < size0_; ++i0) { y[i1 + i0 * size1_] = x[i1 + i0 * size1_] - rm[i1]; } } } template <class T> void MeanSubtractionCuda<T>::forward_impl_global(const Variables &inputs, const Variables &outputs) { // Inputs const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); const T *rm = inputs[1]->get_data_pointer<T>(this->ctx_); // running mean // Output T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_mean_subtraction_forward_global, this->size1_, this->size0_, x, rm, y); } template <typename T> void MeanSubtractionCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->update_runing_mean_) { // Training mode. backward_impl_batch(inputs, outputs, propagate_down, accum); } else { // Testing mode. backward_impl_global(inputs, outputs, propagate_down, accum); } } template <typename T, bool accum> __global__ void kernel_mean_subtraction_backward_batch(const int num, T *dx, const T *dy, const int *t, const int size0_) { const T factor = (T)1.0 / ((*t) * size0_); NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : 0) + dy[idx] * (1 - factor); } } template <class T> void MeanSubtractionCuda<T>::backward_impl_batch( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_); T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_); const int *t = inputs[2]->get_data_pointer<int>(this->ctx_); size_t size = inputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_batch<T, true>), size, dx, dy, t, this->size0_); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_batch<T, false>), size, dx, dy, t, this->size0_); } } template <typename T, bool accum> __global__ void kernel_mean_subtraction_backward_global(const int num, T *dx, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : 0) + dy[idx]; } } template <class T> void MeanSubtractionCuda<T>::backward_impl_global( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_); T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_); size_t size = inputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_global<T, true>), size, dx, dy); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_global<T, false>), size, dx, dy); } } // template instantiation template class MeanSubtractionCuda<float>; }
c2518f277d36c4da59030a0036fc45a6412b8342.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // -*- coding:utf-8 -*- /* * Copyright (C) 2016 Sony Corporation * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Sony Corporation; * the contents of this file is not to be disclosed to third parties, copied * or duplicated in any form, in whole or in part, without the prior written * permission of Sony Corporation. */ #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/mean_subtraction.hpp> #include <nbla/cuda/math.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> void MeanSubtractionCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->update_runing_mean_) { // Training mode. forward_impl_batch(inputs, outputs); } else { // Testing mode. forward_impl_global(inputs, outputs); } } template <typename T> __global__ void kernel_mean_subtraction_inc_t(T *t, const int max) { if (t[0] < max) { t[0] = t[0] + 1; } } template <typename T> __global__ void kernel_mean_subtraction_forward_batch(const int size1_, const int size0_, const T *x, T *m, T *rm, T *y, const int *t) { NBLA_CUDA_KERNEL_LOOP(i1, size1_) { T coef = 1.0 / ((*t) + 1); // Batch mean T mean = 0; for (int i0 = 0; i0 < size0_; ++i0) { mean += x[i1 + i0 * size1_]; } m[i1] = mean / size0_; // Moving mean rm[i1] = rm[i1] + (m[i1] - rm[i1]) * coef; // Output for (int i0 = 0; i0 < size0_; ++i0) { y[i1 + i0 * size1_] = x[i1 + i0 * size1_] - rm[i1]; } } } template <class T> void MeanSubtractionCuda<T>::forward_impl_batch(const Variables &inputs, const Variables &outputs) { // Inputs const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); // Output T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_); Variable *batch_mean = &this->mean_; T *m = batch_mean->cast_data_and_get_pointer<T>(this->ctx_); // batch mean // Inputs/Outputs T *rm = inputs[1]->cast_data_and_get_pointer<T>(this->ctx_); // running mean int *t = inputs[2]->cast_data_and_get_pointer<int>(this->ctx_); // running count NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_mean_subtraction_forward_batch, this->size1_, this->size0_, x, m, rm, y, t); kernel_mean_subtraction_inc_t<<<1, 1>>>(t, std::numeric_limits<int>::max()); } template <typename T> __global__ void kernel_mean_subtraction_forward_global(const int size1_, const int size0_, const T *x, const T *rm, T *y) { NBLA_CUDA_KERNEL_LOOP(i1, size1_) { for (int i0 = 0; i0 < size0_; ++i0) { y[i1 + i0 * size1_] = x[i1 + i0 * size1_] - rm[i1]; } } } template <class T> void MeanSubtractionCuda<T>::forward_impl_global(const Variables &inputs, const Variables &outputs) { // Inputs const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); const T *rm = inputs[1]->get_data_pointer<T>(this->ctx_); // running mean // Output T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_mean_subtraction_forward_global, this->size1_, this->size0_, x, rm, y); } template <typename T> void MeanSubtractionCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->update_runing_mean_) { // Training mode. backward_impl_batch(inputs, outputs, propagate_down, accum); } else { // Testing mode. backward_impl_global(inputs, outputs, propagate_down, accum); } } template <typename T, bool accum> __global__ void kernel_mean_subtraction_backward_batch(const int num, T *dx, const T *dy, const int *t, const int size0_) { const T factor = (T)1.0 / ((*t) * size0_); NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : 0) + dy[idx] * (1 - factor); } } template <class T> void MeanSubtractionCuda<T>::backward_impl_batch( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_); T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_); const int *t = inputs[2]->get_data_pointer<int>(this->ctx_); size_t size = inputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_batch<T, true>), size, dx, dy, t, this->size0_); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_batch<T, false>), size, dx, dy, t, this->size0_); } } template <typename T, bool accum> __global__ void kernel_mean_subtraction_backward_global(const int num, T *dx, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : 0) + dy[idx]; } } template <class T> void MeanSubtractionCuda<T>::backward_impl_global( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_); T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_); size_t size = inputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_global<T, true>), size, dx, dy); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_global<T, false>), size, dx, dy); } } // template instantiation template class MeanSubtractionCuda<float>; }
e0d5c1f03a23d3c2a8044098a815b6948dd13868.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "StdAfx.h" #include "CudaMD5.h" #define HASH_SIZE sizeof(char) * 16 #define HASH_STRING_SIZE 33 __constant__ uint g_auiTArray[64] = { 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391, }; __constant__ uchar g_aucPad[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; typedef struct { uint uiCount[2]; /* message length in bits, lsw first */ uint uiABCD[4]; /* digest buffer */ uchar szBuffer[64]; /* accumulate block */ } CMD5State; void CreateDeviceInputData(const string_v &vsInputStrings, char *&d_pData) { sizeint siDataSize = vsInputStrings.size(); sizeint siTotalElemsCount = 0; for (auto &sValue : vsInputStrings) { siTotalElemsCount += sValue.length(); } sizeint siHeaderSize = siDataSize * 2 * sizeof(int); sizeint siTotalDataSize = siHeaderSize + (siTotalElemsCount + siDataSize) * sizeof(char *); // + siDataSize -- needed for '\0' last char char *pcTempData = (char *)malloc(siTotalDataSize); ASSERTE(pcTempData); int *piHeader = (int *)pcTempData; sizeint siDataOffset = siHeaderSize; for (sizeint siIndex = 0; siIndex < siDataSize; ++siIndex) { sizeint siElementLength = vsInputStrings[siIndex].length(); piHeader[siIndex * 2] = siDataOffset; piHeader[siIndex * 2 + 1] = siElementLength; strcpy(pcTempData + siDataOffset, vsInputStrings[siIndex].c_str()); siDataOffset += siElementLength + 1; } cudaErrorCheck(hipMalloc(&d_pData, siTotalDataSize)); cudaErrorCheck(hipMemcpy(d_pData, pcTempData, siTotalDataSize, hipMemcpyHostToDevice)); free(pcTempData); } void CreateDeviceResultData(sizeint siStringsCount, char *&d_pData) { sizeint siDataSize = siStringsCount * HASH_SIZE; cudaErrorCheck(hipMalloc(&d_pData, siDataSize)); } void FreeDeviceData(char *d_pData) { cudaErrorCheck(hipFree(d_pData)); } void ConvertDeviceResultToVector(char *d_pResults, sizeint siStringsCount, string_v &vsOutResults) { sizeint siHashSize = HASH_SIZE; sizeint siDataSize = siStringsCount * siHashSize; char *h_pData = (char *)malloc(siDataSize); ASSERTE(h_pData); cudaErrorCheck(hipMemcpy(h_pData, d_pResults, siDataSize, hipMemcpyDeviceToHost)); string_v vsHashes; char szBuffer[33]; uchar *pchMd5Hash = (uchar *)h_pData; for (sizeint idx = 0; idx < siStringsCount; ++idx) { sprintf(szBuffer, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", pchMd5Hash[0], pchMd5Hash[1], pchMd5Hash[2], pchMd5Hash[3], pchMd5Hash[4], pchMd5Hash[5], pchMd5Hash[6], pchMd5Hash[7], pchMd5Hash[8], pchMd5Hash[9], pchMd5Hash[10], pchMd5Hash[11], pchMd5Hash[12], pchMd5Hash[13], pchMd5Hash[14], pchMd5Hash[15]); vsHashes.push_back(szBuffer); pchMd5Hash += siHashSize; } free(h_pData); vsOutResults.swap(vsHashes); } __device__ void MD5Init(CMD5State *pmdMD5State) { pmdMD5State->uiCount[0] = pmdMD5State->uiCount[1] = 0; pmdMD5State->uiABCD[0] = 0x67452301; pmdMD5State->uiABCD[1] = 0xefcdab89; pmdMD5State->uiABCD[2] = 0x98badcfe; pmdMD5State->uiABCD[3] = 0x10325476; } __device__ void MD5Process(CMD5State *pmdMD5State, const uchar *pucData /*[64]*/) { uint a = pmdMD5State->uiABCD[0], b = pmdMD5State->uiABCD[1]; uint c = pmdMD5State->uiABCD[2], d = pmdMD5State->uiABCD[3]; uint t; #ifdef ARCH_IS_BIG_ENDIAN /* On big-endian machines, we must arrange the bytes in the right order. (This also works on machines of unknown byte order.) */ uint puiX[16]; const uchar *pucXP = pucData; for (sizeint idx = 0; idx < 16; ++idx, pucXP += 4) { puiX[idx] = pucXP[0] + (pucXP[1] << 8) + (pucXP[2] << 16) + (pucXP[3] << 24); } #else /* On little-endian machines, we can process properly aligned data without copying it. */ uint auiXBuf[16]; const uint *puiX; if (!((pucData - (const uchar *)0) & 3)) { puiX = (const uint *)pucData; /* data are properly aligned */ } else { memcpy(auiXBuf, pucData, 64); /* not aligned */ puiX = auiXBuf; } #endif #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) /* Round 1. Let [abcd k s i] denote the operation -- a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ #define F(x, y, z) (((x) & (y)) | (~(x) & (z))) #define SET(a, b, c, d, k, s, Ti) \ t = a + F(b,c,d) + puiX[k] + Ti; \ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 0, 7, g_auiTArray[0]); SET(d, a, b, c, 1, 12, g_auiTArray[1]); SET(c, d, a, b, 2, 17, g_auiTArray[2]); SET(b, c, d, a, 3, 22, g_auiTArray[3]); SET(a, b, c, d, 4, 7, g_auiTArray[4]); SET(d, a, b, c, 5, 12, g_auiTArray[5]); SET(c, d, a, b, 6, 17, g_auiTArray[6]); SET(b, c, d, a, 7, 22, g_auiTArray[7]); SET(a, b, c, d, 8, 7, g_auiTArray[8]); SET(d, a, b, c, 9, 12, g_auiTArray[9]); SET(c, d, a, b, 10, 17, g_auiTArray[10]); SET(b, c, d, a, 11, 22, g_auiTArray[11]); SET(a, b, c, d, 12, 7, g_auiTArray[12]); SET(d, a, b, c, 13, 12, g_auiTArray[13]); SET(c, d, a, b, 14, 17, g_auiTArray[14]); SET(b, c, d, a, 15, 22, g_auiTArray[15]); #undef SET /* Round 2. Let [abcd k s i] denote the operation -- a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ #define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) #define SET(a, b, c, d, k, s, Ti)\ t = a + G(b,c,d) + puiX[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 1, 5, g_auiTArray[16]); SET(d, a, b, c, 6, 9, g_auiTArray[17]); SET(c, d, a, b, 11, 14, g_auiTArray[18]); SET(b, c, d, a, 0, 20, g_auiTArray[19]); SET(a, b, c, d, 5, 5, g_auiTArray[20]); SET(d, a, b, c, 10, 9, g_auiTArray[21]); SET(c, d, a, b, 15, 14, g_auiTArray[22]); SET(b, c, d, a, 4, 20, g_auiTArray[23]); SET(a, b, c, d, 9, 5, g_auiTArray[24]); SET(d, a, b, c, 14, 9, g_auiTArray[25]); SET(c, d, a, b, 3, 14, g_auiTArray[26]); SET(b, c, d, a, 8, 20, g_auiTArray[27]); SET(a, b, c, d, 13, 5, g_auiTArray[28]); SET(d, a, b, c, 2, 9, g_auiTArray[29]); SET(c, d, a, b, 7, 14, g_auiTArray[30]); SET(b, c, d, a, 12, 20, g_auiTArray[31]); #undef SET /* Round 3. Let [abcd k s t] denote the operation -- a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ #define H(x, y, z) ((x) ^ (y) ^ (z)) #define SET(a, b, c, d, k, s, Ti)\ t = a + H(b,c,d) + puiX[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 5, 4, g_auiTArray[32]); SET(d, a, b, c, 8, 11, g_auiTArray[33]); SET(c, d, a, b, 11, 16, g_auiTArray[34]); SET(b, c, d, a, 14, 23, g_auiTArray[35]); SET(a, b, c, d, 1, 4, g_auiTArray[36]); SET(d, a, b, c, 4, 11, g_auiTArray[37]); SET(c, d, a, b, 7, 16, g_auiTArray[38]); SET(b, c, d, a, 10, 23, g_auiTArray[39]); SET(a, b, c, d, 13, 4, g_auiTArray[40]); SET(d, a, b, c, 0, 11, g_auiTArray[41]); SET(c, d, a, b, 3, 16, g_auiTArray[42]); SET(b, c, d, a, 6, 23, g_auiTArray[43]); SET(a, b, c, d, 9, 4, g_auiTArray[44]); SET(d, a, b, c, 12, 11, g_auiTArray[45]); SET(c, d, a, b, 15, 16, g_auiTArray[46]); SET(b, c, d, a, 2, 23, g_auiTArray[47]); #undef SET /* Round 4. Let [abcd k s t] denote the operation -- a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ #define I(x, y, z) ((y) ^ ((x) | ~(z))) #define SET(a, b, c, d, k, s, Ti)\ t = a + I(b,c,d) + puiX[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 0, 6, g_auiTArray[48]); SET(d, a, b, c, 7, 10, g_auiTArray[49]); SET(c, d, a, b, 14, 15, g_auiTArray[50]); SET(b, c, d, a, 5, 21, g_auiTArray[51]); SET(a, b, c, d, 12, 6, g_auiTArray[52]); SET(d, a, b, c, 3, 10, g_auiTArray[53]); SET(c, d, a, b, 10, 15, g_auiTArray[54]); SET(b, c, d, a, 1, 21, g_auiTArray[55]); SET(a, b, c, d, 8, 6, g_auiTArray[56]); SET(d, a, b, c, 15, 10, g_auiTArray[57]); SET(c, d, a, b, 6, 15, g_auiTArray[58]); SET(b, c, d, a, 13, 21, g_auiTArray[59]); SET(a, b, c, d, 4, 6, g_auiTArray[60]); SET(d, a, b, c, 11, 10, g_auiTArray[61]); SET(c, d, a, b, 2, 15, g_auiTArray[62]); SET(b, c, d, a, 9, 21, g_auiTArray[63]); #undef SET /* Then perform the following additions. (That is increment each of the four registers by the value it had before this block was started.) */ pmdMD5State->uiABCD[0] += a; pmdMD5State->uiABCD[1] += b; pmdMD5State->uiABCD[2] += c; pmdMD5State->uiABCD[3] += d; } __device__ void MD5Append(CMD5State *pmdMD5State, const uchar *aucData, int nBytes) { do { const uchar *pData = aucData; int nLeftBytes = nBytes; int nOffset = (pmdMD5State->uiCount[0] >> 3) & 63; uint uiBits = (uint)(nBytes << 3); if (nBytes <= 0) { break; } /* Update the message length. */ pmdMD5State->uiCount[1] += nBytes >> 29; pmdMD5State->uiCount[0] += uiBits; if (pmdMD5State->uiCount[0] < uiBits) { pmdMD5State->uiCount[1]++; } /* Process an initial partial block. */ if (nOffset) { int nCopy = (nOffset + nBytes > 64 ? 64 - nOffset : nBytes); memcpy(pmdMD5State->szBuffer + nOffset, pData, nCopy); if (nOffset + nCopy < 64) { break; } pData += nCopy; nLeftBytes -= nCopy; MD5Process(pmdMD5State, pmdMD5State->szBuffer); } /* Process full blocks. */ for (; nLeftBytes >= 64; pData += 64, nLeftBytes -= 64) { MD5Process(pmdMD5State, pData); } /* Process a final partial block. */ if (nLeftBytes) { memcpy(pmdMD5State->szBuffer, pData, nLeftBytes); } } while (false); } __device__ void MD5Finish(CMD5State *pmdMD5State, uchar aucDigest[16]) { uchar aucData[8]; /* Save the length before padding. */ for (sizeint i = 0; i < 8; ++i) { aucData[i] = (uchar)(pmdMD5State->uiCount[i >> 2] >> ((i & 3) << 3)); } /* Pad to 56 bytes mod 64. */ MD5Append(pmdMD5State, g_aucPad, ((55 - (pmdMD5State->uiCount[0] >> 3)) & 63) + 1); /* Append the length. */ MD5Append(pmdMD5State, aucData, 8); for (sizeint i = 0; i < 16; ++i) { aucDigest[i] = (uchar)(pmdMD5State->uiABCD[i >> 2] >> ((i & 3) << 3)); } } __device__ void CalculateMD5(const char *szData, char *pcResultData, uint uiLength) { CMD5State msMD5State; MD5Init(&msMD5State); MD5Append(&msMD5State, (const uchar *)szData, uiLength); const int siHashSize = HASH_SIZE; uchar aucDigest[siHashSize]; MD5Finish(&msMD5State, aucDigest); for (int idx = 0; idx < siHashSize; ++idx) { pcResultData[idx] = aucDigest[idx]; } //printf("%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", // aucDigest[0], aucDigest[1], aucDigest[2], aucDigest[3], aucDigest[4], aucDigest[5], aucDigest[6], aucDigest[7], // aucDigest[8], aucDigest[9], aucDigest[10], aucDigest[11], aucDigest[12], aucDigest[13], aucDigest[14], aucDigest[15]); } __global__ void CalculateMd5Kernel(const char *pcData, char *pcResultData, sizeint siDataSize) { int tid = threadIdx.x; if (tid < siDataSize) { int idx = tid << 1; uint uiOffset = ((int *)pcData)[idx]; uint uiLength = ((int *)pcData)[idx + 1]; const char *pszCurrentString = pcData + uiOffset; char *pszResultData = pcResultData + tid * HASH_SIZE; CalculateMD5(pszCurrentString, pszResultData, uiLength); } } void CalculateMd5(const string_v &vsInputStrings, string_v &vsResults) { char *d_pData = NULL, *d_pResults = NULL; sizeint siStringsCount = vsInputStrings.size(); CreateDeviceInputData(vsInputStrings, d_pData); CreateDeviceResultData(siStringsCount, d_pResults); ASSERTE(d_pData && d_pResults); hipLaunchKernelGGL(( CalculateMd5Kernel), dim3(1), dim3(siStringsCount), 0, 0, d_pData, d_pResults, siStringsCount); cudaErrorCheck(hipGetLastError()); cudaErrorCheck(hipDeviceSynchronize()); ConvertDeviceResultToVector(d_pResults, siStringsCount, vsResults); FreeDeviceData(d_pData); FreeDeviceData(d_pResults); }
e0d5c1f03a23d3c2a8044098a815b6948dd13868.cu
#include "StdAfx.h" #include "CudaMD5.h" #define HASH_SIZE sizeof(char) * 16 #define HASH_STRING_SIZE 33 __constant__ uint g_auiTArray[64] = { 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391, }; __constant__ uchar g_aucPad[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; typedef struct { uint uiCount[2]; /* message length in bits, lsw first */ uint uiABCD[4]; /* digest buffer */ uchar szBuffer[64]; /* accumulate block */ } CMD5State; void CreateDeviceInputData(const string_v &vsInputStrings, char *&d_pData) { sizeint siDataSize = vsInputStrings.size(); sizeint siTotalElemsCount = 0; for (auto &sValue : vsInputStrings) { siTotalElemsCount += sValue.length(); } sizeint siHeaderSize = siDataSize * 2 * sizeof(int); sizeint siTotalDataSize = siHeaderSize + (siTotalElemsCount + siDataSize) * sizeof(char *); // + siDataSize -- needed for '\0' last char char *pcTempData = (char *)malloc(siTotalDataSize); ASSERTE(pcTempData); int *piHeader = (int *)pcTempData; sizeint siDataOffset = siHeaderSize; for (sizeint siIndex = 0; siIndex < siDataSize; ++siIndex) { sizeint siElementLength = vsInputStrings[siIndex].length(); piHeader[siIndex * 2] = siDataOffset; piHeader[siIndex * 2 + 1] = siElementLength; strcpy(pcTempData + siDataOffset, vsInputStrings[siIndex].c_str()); siDataOffset += siElementLength + 1; } cudaErrorCheck(cudaMalloc(&d_pData, siTotalDataSize)); cudaErrorCheck(cudaMemcpy(d_pData, pcTempData, siTotalDataSize, cudaMemcpyHostToDevice)); free(pcTempData); } void CreateDeviceResultData(sizeint siStringsCount, char *&d_pData) { sizeint siDataSize = siStringsCount * HASH_SIZE; cudaErrorCheck(cudaMalloc(&d_pData, siDataSize)); } void FreeDeviceData(char *d_pData) { cudaErrorCheck(cudaFree(d_pData)); } void ConvertDeviceResultToVector(char *d_pResults, sizeint siStringsCount, string_v &vsOutResults) { sizeint siHashSize = HASH_SIZE; sizeint siDataSize = siStringsCount * siHashSize; char *h_pData = (char *)malloc(siDataSize); ASSERTE(h_pData); cudaErrorCheck(cudaMemcpy(h_pData, d_pResults, siDataSize, cudaMemcpyDeviceToHost)); string_v vsHashes; char szBuffer[33]; uchar *pchMd5Hash = (uchar *)h_pData; for (sizeint idx = 0; idx < siStringsCount; ++idx) { sprintf(szBuffer, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", pchMd5Hash[0], pchMd5Hash[1], pchMd5Hash[2], pchMd5Hash[3], pchMd5Hash[4], pchMd5Hash[5], pchMd5Hash[6], pchMd5Hash[7], pchMd5Hash[8], pchMd5Hash[9], pchMd5Hash[10], pchMd5Hash[11], pchMd5Hash[12], pchMd5Hash[13], pchMd5Hash[14], pchMd5Hash[15]); vsHashes.push_back(szBuffer); pchMd5Hash += siHashSize; } free(h_pData); vsOutResults.swap(vsHashes); } __device__ void MD5Init(CMD5State *pmdMD5State) { pmdMD5State->uiCount[0] = pmdMD5State->uiCount[1] = 0; pmdMD5State->uiABCD[0] = 0x67452301; pmdMD5State->uiABCD[1] = 0xefcdab89; pmdMD5State->uiABCD[2] = 0x98badcfe; pmdMD5State->uiABCD[3] = 0x10325476; } __device__ void MD5Process(CMD5State *pmdMD5State, const uchar *pucData /*[64]*/) { uint a = pmdMD5State->uiABCD[0], b = pmdMD5State->uiABCD[1]; uint c = pmdMD5State->uiABCD[2], d = pmdMD5State->uiABCD[3]; uint t; #ifdef ARCH_IS_BIG_ENDIAN /* On big-endian machines, we must arrange the bytes in the right order. (This also works on machines of unknown byte order.) */ uint puiX[16]; const uchar *pucXP = pucData; for (sizeint idx = 0; idx < 16; ++idx, pucXP += 4) { puiX[idx] = pucXP[0] + (pucXP[1] << 8) + (pucXP[2] << 16) + (pucXP[3] << 24); } #else /* On little-endian machines, we can process properly aligned data without copying it. */ uint auiXBuf[16]; const uint *puiX; if (!((pucData - (const uchar *)0) & 3)) { puiX = (const uint *)pucData; /* data are properly aligned */ } else { memcpy(auiXBuf, pucData, 64); /* not aligned */ puiX = auiXBuf; } #endif #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) /* Round 1. Let [abcd k s i] denote the operation -- a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ #define F(x, y, z) (((x) & (y)) | (~(x) & (z))) #define SET(a, b, c, d, k, s, Ti) \ t = a + F(b,c,d) + puiX[k] + Ti; \ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 0, 7, g_auiTArray[0]); SET(d, a, b, c, 1, 12, g_auiTArray[1]); SET(c, d, a, b, 2, 17, g_auiTArray[2]); SET(b, c, d, a, 3, 22, g_auiTArray[3]); SET(a, b, c, d, 4, 7, g_auiTArray[4]); SET(d, a, b, c, 5, 12, g_auiTArray[5]); SET(c, d, a, b, 6, 17, g_auiTArray[6]); SET(b, c, d, a, 7, 22, g_auiTArray[7]); SET(a, b, c, d, 8, 7, g_auiTArray[8]); SET(d, a, b, c, 9, 12, g_auiTArray[9]); SET(c, d, a, b, 10, 17, g_auiTArray[10]); SET(b, c, d, a, 11, 22, g_auiTArray[11]); SET(a, b, c, d, 12, 7, g_auiTArray[12]); SET(d, a, b, c, 13, 12, g_auiTArray[13]); SET(c, d, a, b, 14, 17, g_auiTArray[14]); SET(b, c, d, a, 15, 22, g_auiTArray[15]); #undef SET /* Round 2. Let [abcd k s i] denote the operation -- a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ #define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) #define SET(a, b, c, d, k, s, Ti)\ t = a + G(b,c,d) + puiX[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 1, 5, g_auiTArray[16]); SET(d, a, b, c, 6, 9, g_auiTArray[17]); SET(c, d, a, b, 11, 14, g_auiTArray[18]); SET(b, c, d, a, 0, 20, g_auiTArray[19]); SET(a, b, c, d, 5, 5, g_auiTArray[20]); SET(d, a, b, c, 10, 9, g_auiTArray[21]); SET(c, d, a, b, 15, 14, g_auiTArray[22]); SET(b, c, d, a, 4, 20, g_auiTArray[23]); SET(a, b, c, d, 9, 5, g_auiTArray[24]); SET(d, a, b, c, 14, 9, g_auiTArray[25]); SET(c, d, a, b, 3, 14, g_auiTArray[26]); SET(b, c, d, a, 8, 20, g_auiTArray[27]); SET(a, b, c, d, 13, 5, g_auiTArray[28]); SET(d, a, b, c, 2, 9, g_auiTArray[29]); SET(c, d, a, b, 7, 14, g_auiTArray[30]); SET(b, c, d, a, 12, 20, g_auiTArray[31]); #undef SET /* Round 3. Let [abcd k s t] denote the operation -- a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ #define H(x, y, z) ((x) ^ (y) ^ (z)) #define SET(a, b, c, d, k, s, Ti)\ t = a + H(b,c,d) + puiX[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 5, 4, g_auiTArray[32]); SET(d, a, b, c, 8, 11, g_auiTArray[33]); SET(c, d, a, b, 11, 16, g_auiTArray[34]); SET(b, c, d, a, 14, 23, g_auiTArray[35]); SET(a, b, c, d, 1, 4, g_auiTArray[36]); SET(d, a, b, c, 4, 11, g_auiTArray[37]); SET(c, d, a, b, 7, 16, g_auiTArray[38]); SET(b, c, d, a, 10, 23, g_auiTArray[39]); SET(a, b, c, d, 13, 4, g_auiTArray[40]); SET(d, a, b, c, 0, 11, g_auiTArray[41]); SET(c, d, a, b, 3, 16, g_auiTArray[42]); SET(b, c, d, a, 6, 23, g_auiTArray[43]); SET(a, b, c, d, 9, 4, g_auiTArray[44]); SET(d, a, b, c, 12, 11, g_auiTArray[45]); SET(c, d, a, b, 15, 16, g_auiTArray[46]); SET(b, c, d, a, 2, 23, g_auiTArray[47]); #undef SET /* Round 4. Let [abcd k s t] denote the operation -- a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ #define I(x, y, z) ((y) ^ ((x) | ~(z))) #define SET(a, b, c, d, k, s, Ti)\ t = a + I(b,c,d) + puiX[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 0, 6, g_auiTArray[48]); SET(d, a, b, c, 7, 10, g_auiTArray[49]); SET(c, d, a, b, 14, 15, g_auiTArray[50]); SET(b, c, d, a, 5, 21, g_auiTArray[51]); SET(a, b, c, d, 12, 6, g_auiTArray[52]); SET(d, a, b, c, 3, 10, g_auiTArray[53]); SET(c, d, a, b, 10, 15, g_auiTArray[54]); SET(b, c, d, a, 1, 21, g_auiTArray[55]); SET(a, b, c, d, 8, 6, g_auiTArray[56]); SET(d, a, b, c, 15, 10, g_auiTArray[57]); SET(c, d, a, b, 6, 15, g_auiTArray[58]); SET(b, c, d, a, 13, 21, g_auiTArray[59]); SET(a, b, c, d, 4, 6, g_auiTArray[60]); SET(d, a, b, c, 11, 10, g_auiTArray[61]); SET(c, d, a, b, 2, 15, g_auiTArray[62]); SET(b, c, d, a, 9, 21, g_auiTArray[63]); #undef SET /* Then perform the following additions. (That is increment each of the four registers by the value it had before this block was started.) */ pmdMD5State->uiABCD[0] += a; pmdMD5State->uiABCD[1] += b; pmdMD5State->uiABCD[2] += c; pmdMD5State->uiABCD[3] += d; } __device__ void MD5Append(CMD5State *pmdMD5State, const uchar *aucData, int nBytes) { do { const uchar *pData = aucData; int nLeftBytes = nBytes; int nOffset = (pmdMD5State->uiCount[0] >> 3) & 63; uint uiBits = (uint)(nBytes << 3); if (nBytes <= 0) { break; } /* Update the message length. */ pmdMD5State->uiCount[1] += nBytes >> 29; pmdMD5State->uiCount[0] += uiBits; if (pmdMD5State->uiCount[0] < uiBits) { pmdMD5State->uiCount[1]++; } /* Process an initial partial block. */ if (nOffset) { int nCopy = (nOffset + nBytes > 64 ? 64 - nOffset : nBytes); memcpy(pmdMD5State->szBuffer + nOffset, pData, nCopy); if (nOffset + nCopy < 64) { break; } pData += nCopy; nLeftBytes -= nCopy; MD5Process(pmdMD5State, pmdMD5State->szBuffer); } /* Process full blocks. */ for (; nLeftBytes >= 64; pData += 64, nLeftBytes -= 64) { MD5Process(pmdMD5State, pData); } /* Process a final partial block. */ if (nLeftBytes) { memcpy(pmdMD5State->szBuffer, pData, nLeftBytes); } } while (false); } __device__ void MD5Finish(CMD5State *pmdMD5State, uchar aucDigest[16]) { uchar aucData[8]; /* Save the length before padding. */ for (sizeint i = 0; i < 8; ++i) { aucData[i] = (uchar)(pmdMD5State->uiCount[i >> 2] >> ((i & 3) << 3)); } /* Pad to 56 bytes mod 64. */ MD5Append(pmdMD5State, g_aucPad, ((55 - (pmdMD5State->uiCount[0] >> 3)) & 63) + 1); /* Append the length. */ MD5Append(pmdMD5State, aucData, 8); for (sizeint i = 0; i < 16; ++i) { aucDigest[i] = (uchar)(pmdMD5State->uiABCD[i >> 2] >> ((i & 3) << 3)); } } __device__ void CalculateMD5(const char *szData, char *pcResultData, uint uiLength) { CMD5State msMD5State; MD5Init(&msMD5State); MD5Append(&msMD5State, (const uchar *)szData, uiLength); const int siHashSize = HASH_SIZE; uchar aucDigest[siHashSize]; MD5Finish(&msMD5State, aucDigest); for (int idx = 0; idx < siHashSize; ++idx) { pcResultData[idx] = aucDigest[idx]; } //printf("%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", // aucDigest[0], aucDigest[1], aucDigest[2], aucDigest[3], aucDigest[4], aucDigest[5], aucDigest[6], aucDigest[7], // aucDigest[8], aucDigest[9], aucDigest[10], aucDigest[11], aucDigest[12], aucDigest[13], aucDigest[14], aucDigest[15]); } __global__ void CalculateMd5Kernel(const char *pcData, char *pcResultData, sizeint siDataSize) { int tid = threadIdx.x; if (tid < siDataSize) { int idx = tid << 1; uint uiOffset = ((int *)pcData)[idx]; uint uiLength = ((int *)pcData)[idx + 1]; const char *pszCurrentString = pcData + uiOffset; char *pszResultData = pcResultData + tid * HASH_SIZE; CalculateMD5(pszCurrentString, pszResultData, uiLength); } } void CalculateMd5(const string_v &vsInputStrings, string_v &vsResults) { char *d_pData = NULL, *d_pResults = NULL; sizeint siStringsCount = vsInputStrings.size(); CreateDeviceInputData(vsInputStrings, d_pData); CreateDeviceResultData(siStringsCount, d_pResults); ASSERTE(d_pData && d_pResults); CalculateMd5Kernel<<<1, siStringsCount>>>(d_pData, d_pResults, siStringsCount); cudaErrorCheck(cudaGetLastError()); cudaErrorCheck(cudaDeviceSynchronize()); ConvertDeviceResultToVector(d_pResults, siStringsCount, vsResults); FreeDeviceData(d_pData); FreeDeviceData(d_pResults); }
99fbb3e35af630dc995675026d1224f2d547b219.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } bools[idx] = (idata[idx] == 0 ? 0 : 1); } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } if (bools[idx] == 1) { odata[indices[idx]] = idata[idx]; } } } }
99fbb3e35af630dc995675026d1224f2d547b219.cu
#include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } bools[idx] = (idata[idx] == 0 ? 0 : 1); } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } if (bools[idx] == 1) { odata[indices[idx]] = idata[idx]; } } } }
9027d14dc99bdcb8816879913948fe3ac359e617.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define NX 100 // No. of cells in x direction #define NY 100 // No. of cells in y direction #define NZ 100 // No. of cells in z direction #define N (NX*NY*NZ) // N = total number of cells in domain #define L 100 // L = length of domain (m) #define H 100 // H = Height of domain (m) #define W 100 // W = Width of domain (m) #define DX (L/NX) // DX, DY, DZ = grid spacing in x,y,z. #define DY (H/NY) #define DZ (W/NZ) #define DT 0.001 // Time step (seconds) #define R (1.0) // Dimensionless specific gas constant #define GAMA (7.0/5.0) // Ratio of specific heats #define CV (R/(GAMA-1.0)) // Cv #define CP (CV + R) // Cp //#define DEBUG_VALUE float *dens; //density float *temperature; //temperature float *xv; //velocity in x float *yv; //velocity in y float *zv; //velocity in z float *press; //pressure float *d_dens; //density float *d_temperature; //temperature float *d_xv; //velocity in x float *d_yv; //velocity in y float *d_zv; //velocity in z float *d_press; //pressure float *U; float *U_new; float *E; float *F; float *G; float *FF; float *FB; float *FR; float *FL; float *FU; float *FD; float *h_body; float *d_body; int total_cells = 0; // A counter for computed cells __global__ void GPUHeatContactFunction(float *a, float *b, int *body) { }
9027d14dc99bdcb8816879913948fe3ac359e617.cu
#include "includes.h" #define NX 100 // No. of cells in x direction #define NY 100 // No. of cells in y direction #define NZ 100 // No. of cells in z direction #define N (NX*NY*NZ) // N = total number of cells in domain #define L 100 // L = length of domain (m) #define H 100 // H = Height of domain (m) #define W 100 // W = Width of domain (m) #define DX (L/NX) // DX, DY, DZ = grid spacing in x,y,z. #define DY (H/NY) #define DZ (W/NZ) #define DT 0.001 // Time step (seconds) #define R (1.0) // Dimensionless specific gas constant #define GAMA (7.0/5.0) // Ratio of specific heats #define CV (R/(GAMA-1.0)) // Cv #define CP (CV + R) // Cp //#define DEBUG_VALUE float *dens; //density float *temperature; //temperature float *xv; //velocity in x float *yv; //velocity in y float *zv; //velocity in z float *press; //pressure float *d_dens; //density float *d_temperature; //temperature float *d_xv; //velocity in x float *d_yv; //velocity in y float *d_zv; //velocity in z float *d_press; //pressure float *U; float *U_new; float *E; float *F; float *G; float *FF; float *FB; float *FR; float *FL; float *FU; float *FD; float *h_body; float *d_body; int total_cells = 0; // A counter for computed cells __global__ void GPUHeatContactFunction(float *a, float *b, int *body) { }
0b705ff736b91b25d61132dcd8468e93bf52cf01.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/PinnedMemoryAllocator.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/hip/MiscUtils.h> #include <ATen/native/Resize.h> #include <ATen/native/BatchLinearAlgebra.h> #include <ATen/native/hip/BatchLinearAlgebraLib.h> #include <ATen/native/cpu/zmath.h> #include <THH/THH.h> // for USE_MAGMA #ifdef USE_MAGMA #include <magma_types.h> #include <magma_v2.h> const bool use_magma_ = true; #else const bool use_magma_ = false; #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLu( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info); template<class scalar_t> void magmaLuBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLuNoPiv( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaLuNoPivBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n); template<class scalar_t> void magmaGetri( magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork, magma_int_t lwork, magma_int_t* info); template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholeskySolve( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaCholeskySolveBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholesky( magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template <class scalar_t> void magmaTriangularSolve( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaTriangularSolveBatched( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n); template<class scalar_t> void magmaGeqrf( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2); template<class scalar_t> void magmaOrgqr( magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info); template<class scalar_t, class value_t=scalar_t> void magmaSymeig( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info); template<class scalar_t, class value_t=scalar_t> void magmaEig( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda, scalar_t *w, scalar_t *VL, magma_int_t ldvl, scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork, value_t *rwork, magma_int_t *info); template<class scalar_t, class value_t=scalar_t> void magmaSvd( magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A, magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu, scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork, value_t* rwork, magma_int_t* iwork, magma_int_t* info); template<class scalar_t> void magmaLuSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaLuSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<> void magmaSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolve<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgesv_gpu(n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolve<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgesv_gpu(n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_zgesv_batched(n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_cgesv_batched(n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) { return magma_get_dgetri_nb(n); } template<> inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) { return magma_get_sgetri_nb(n); } template <> inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>( magma_int_t n) { return magma_get_zgetri_nb(n); } template <> inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>( magma_int_t n) { return magma_get_cgetri_nb(n); } template<> void magmaGetri<double>( magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGetri<float>( magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGetri<c10::complex<double>>( magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetri_gpu( n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dwork), lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGetri<c10::complex<float>>( magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetri_gpu( n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dwork), lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGetriBatched<c10::complex<double>>( magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, c10::complex<double>** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetri_outofplace_batched( n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, reinterpret_cast<magmaDoubleComplex**>(dinvA_array), lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGetriBatched<c10::complex<float>>( magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, c10::complex<float>** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetri_outofplace_batched( n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, reinterpret_cast<magmaFloatComplex**>(dinvA_array), lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zpotrs_gpu(uplo, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cpotrs_gpu(uplo, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_zpotrs_batched(uplo, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_cpotrs_batched(uplo, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<double>( magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<float>( magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaTriangularSolve<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, const MAGMAQueue& magma_queue) { magma_dtrsm( MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaTriangularSolve<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, const MAGMAQueue& magma_queue) { magma_strsm( MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaTriangularSolve<c10::complex<double>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb, const MAGMAQueue& magma_queue) { magmaDoubleComplex alpha({1, 0}); magma_ztrsm( MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaTriangularSolve<c10::complex<float>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb, const MAGMAQueue& magma_queue) { magmaFloatComplex alpha({1, 0}); magma_ctrsm( MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(dB), lddb, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<c10::complex<double>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmaDoubleComplex alpha({1, 0}); magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<c10::complex<float>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmaFloatComplex alpha({1, 0}); magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) { return magma_get_dgeqrf_nb(m, n); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) { return magma_get_sgeqrf_nb(m, n); } template <> inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>( magma_int_t m, magma_int_t n) { return magma_get_zgeqrf_nb(m, n); } template <> inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>( magma_int_t m, magma_int_t n) { return magma_get_cgeqrf_nb(m, n); } template<> void magmaGeqrf<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGeqrf<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGeqrf<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* tau, c10::complex<double>* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_zgeqrf_gpu( m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), reinterpret_cast<magmaDoubleComplex*>(dT), info); } else { magma_zgeqrf2_gpu( m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), info); } AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGeqrf<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* tau, c10::complex<float>* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_cgeqrf_gpu( m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), reinterpret_cast<magmaFloatComplex*>(dT), info); } else { magma_cgeqrf2_gpu( m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), info); } AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaOrgqr<double>( magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaOrgqr<float>( magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaOrgqr<c10::complex<double>>( magma_int_t m, magma_int_t n, magma_int_t k, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* tau, c10::complex<double>* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zungqr_gpu( m, n, k, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), reinterpret_cast<magmaDoubleComplex*>(dT), nb, info); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaOrgqr<c10::complex<float>>( magma_int_t m, magma_int_t n, magma_int_t k, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* tau, c10::complex<float>* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cungqr_gpu( m, n, k, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), reinterpret_cast<magmaFloatComplex*>(dT), nb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { (void)rwork; // unused (void)lrwork; // unused MagmaStreamSyncGuard guard; magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { (void)rwork; // unused (void)lrwork; // unused MagmaStreamSyncGuard guard; magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<c10::complex<double>, double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zheevd_gpu( jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA), ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<c10::complex<float>, float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cheevd_gpu( jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA), ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaEig<double>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, double *A, magma_int_t lda, double *w, double *VL, magma_int_t ldvl, double *VR, magma_int_t ldvr, double *work, magma_int_t lwork, double *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; // magma [sd]geev wants to separate output arrays: wr and wi for the real // and imaginary parts double *wr = w; double *wi = w + n; (void)rwork; // unused magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaEig<float>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, float *A, magma_int_t lda, float *w, float *VL, magma_int_t ldvl, float *VR, magma_int_t ldvr, float *work, magma_int_t lwork, float *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; float *wr = w; float *wi = w + n; (void)rwork; // unused magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaEig<c10::complex<double>, double>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, c10::complex<double> *A, magma_int_t lda, c10::complex<double> *w, c10::complex<double> *VL, magma_int_t ldvl, c10::complex<double> *VR, magma_int_t ldvr, c10::complex<double> *work, magma_int_t lwork, double *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; magma_zgeev(jobvl, jobvr, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, reinterpret_cast<magmaDoubleComplex*>(w), reinterpret_cast<magmaDoubleComplex*>(VL), ldvl, reinterpret_cast<magmaDoubleComplex*>(VR), ldvr, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaEig<c10::complex<float>, float>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, c10::complex<float> *A, magma_int_t lda, c10::complex<float> *w, c10::complex<float> *VL, magma_int_t ldvl, c10::complex<float> *VR, magma_int_t ldvr, c10::complex<float> *work, magma_int_t lwork, float *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; magma_cgeev(jobvl, jobvr, n, reinterpret_cast<magmaFloatComplex*>(A), lda, reinterpret_cast<magmaFloatComplex*>(w), reinterpret_cast<magmaFloatComplex*>(VL), ldvl, reinterpret_cast<magmaFloatComplex*>(VR), ldvr, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A, magma_int_t lda, double* s, double* U, magma_int_t ldu, double* VT, magma_int_t ldvt, double* work, magma_int_t lwork, double *rwork, magma_int_t* iwork, magma_int_t* info) { (void)rwork; // unused MagmaStreamSyncGuard guard; magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A, magma_int_t lda, float* s, float* U, magma_int_t ldu, float* VT, magma_int_t ldvt, float* work, magma_int_t lwork, float* rwork, magma_int_t* iwork, magma_int_t* info) { (void)rwork; // unused MagmaStreamSyncGuard guard; magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<c10::complex<float>, float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A, magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu, c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork, float *rwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s, reinterpret_cast<magmaFloatComplex*>(U), ldu, reinterpret_cast<magmaFloatComplex*>(VT), ldvt, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<c10::complex<double>, double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A, magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu, c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork, double *rwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s, reinterpret_cast<magmaDoubleComplex*>(U), ldu, reinterpret_cast<magmaDoubleComplex*>(VT), ldvt, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } #endif #define ALLOCATE_ARRAY(name, type, size) \ auto storage_##name = pin_memory<type>(size); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_solve(Tensor& b, Tensor& A, Tensor& infos) { #ifndef USE_MAGMA AT_ERROR("solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t lda = ::max(magma_int_t{1}, n); if (b.dim() == 2) { auto ipiv = at::empty({n}, at::kInt); infos = infos.to(at::kCPU); // magmaSolve requires infos tensor to live on CPU magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(), b_data, lda, infos.data_ptr<magma_int_t>()); } else { auto infos_data = infos.data_ptr<magma_int_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &infos_data[mini_idx]; magmaSolveBatched<scalar_t>( n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaSolveBatched<scalar_t>( n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda, &infos_data[mini_idx], batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); auto infos = at::empty({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{ apply_solve<scalar_t>(self_working_copy, A_working_copy, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "solve_cuda"); } else { singleCheckErrors(infos.item().toInt(), "solve_cuda"); } return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // This is a type dispatching helper function for 'apply_solve' Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) { // 'result' and 'input' should be in column major order (it should be checked before calling this function) // the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve' // 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations) // 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system) AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{ apply_solve<scalar_t>(result, input, infos); }); return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'. 'infos' is an int Tensor containing error codes for each matrix in the batched input. 'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors For more information see MAGMA's documentation for GETRI and GETRF routines. */ template <typename scalar_t> static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto infos_lu_data = infos_lu.data_ptr<magma_int_t>(); auto infos_getri_data = infos_getri.data_ptr<magma_int_t>(); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); // MAGMA does not work with batch_size == 0, let's return early in this case if (batch_size == 0) { return; } magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(self.get_device()); magmaLuBatched<scalar_t>( n, n, self_array, lda, ipiv_array, infos_lu_data, batch_size, magma_queue); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; scalar_t** self_inv_array_cur = &self_inv_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx]; magmaGetriBatched<scalar_t>( n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur, lda, info_array_cur_getri, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaGetriBatched<scalar_t>( n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx], lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue); } #endif } template <typename scalar_t> static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n); // magmaLu and magmaGetri requires infos tensor to live on CPU infos_lu = infos_lu.to(at::kCPU); infos_getri = infos_getri.to(at::kCPU); Tensor ipiv = at::empty({lda}, at::kInt); Tensor dwork = at::empty({lwork}, self.options()); magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>()); magmaGetri<scalar_t>( n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>()); #endif } Tensor _inverse_helper_cuda_legacy(const Tensor& self) { auto self_inv_working_copy = cloneBatchedColumnMajor(self); if (self.dim() > 2) { auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos_lu, infos_getri); }); batchCheckErrors(infos_lu, "inverse_cuda"); batchCheckErrors(infos_getri, "inverse_cuda"); } else { // magmaLu and magmaGetri requires infos tensor to live on CPU auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU)); auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri); }); singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda"); singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda"); } return self_inv_working_copy; } Tensor _inverse_helper_cuda(const Tensor& self) { #ifdef USE_CUSOLVER if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) { return _inverse_helper_cuda_lib(self); // cusolver or cublas } else { return _inverse_helper_cuda_legacy(self); // magma-cuda } #else return _inverse_helper_cuda_legacy(self); // magma-cuda #endif } // This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors' Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) { // assuming result is in column major order and contains the matrices to invert if (result.dim() > 2) { auto input_working_copy = cloneBatchedColumnMajor(result); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_batched_inverse<scalar_t>( input_working_copy, result, infos_lu, infos_getri); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_single_inverse<scalar_t>(result, infos_lu, infos_getri); }); } return result; } // This is a MAGMA/cuSOLVER dispatching helper function Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) { // This function calculates the inverse matrix in-place // result should be in column major order and contain matrices to invert #ifdef USE_CUSOLVER if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) { return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas } else { return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda } #else return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda #endif return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("cholesky_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); int info_tmp = 0; if (b.dim() == 2) { magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda, b_data, lda, &info_tmp); info = info_tmp; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{ apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info); }); TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); auto lda = std::max<magma_int_t>(1, n); if (self.dim() == 2) { magma_int_t info = 0; magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info); infos[0] = info; } else { auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); int64_t batch_limit = self.is_complex() ? 65535 : 262140; // Compute as many batches of 262140 possible // 262140 is the size of the largest batch of matrices that can be run with // violating maximum kernel configuration // For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information) // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaCholeskyBatched<scalar_t>( uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaCholeskyBatched<scalar_t>( uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor result; if (self.dim() > 2) { // MAGMA's batched cholesky operator has an off-by-one error causing IMA // (see https://github.com/pytorch/pytorch/issues/42666). This code is based // on the #cloneBatchedColumnMajor function however it pads the input with // one extra element utilizing the fact that the resize_as_ method preserves // the storage even if it's larger than the new sizes. This way if MAGMA // reads off bounds it will still be valid user memory. const Tensor input = upper ? self : self.transpose(-1, -2); result = at::empty(input.numel() + 1, input.options()); result.resize_as_(input).copy_(input).transpose_(-1, -2); } else { result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES( self.scalar_type(), "cholesky_cuda", [&] { apply_cholesky<scalar_t>(result, false, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "cholesky_cuda"); } else { singleCheckErrors(infos[0], "cholesky_cuda"); } return upper ? result.transpose_(-1, -2) : result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver This is an in-place routine, content of 'input' is overwritten. 'infos' is an int Tensor containing error codes for each matrix in the batched input. MAGMA requires 'infos' to reside in CPU memory. For more information see MAGMA's documentation for POTRS routine. */ template <typename scalar_t> static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) { #ifndef USE_MAGMA TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA."); #else // magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally // it transfers data several times between GPU and CPU and calls lapack routine on CPU // using magmaCholeskySolveBatched is a lot faster // note that magmaCholeskySolve is also slow // 'input' is modified in-place we need to clone it and replace with a diagonal matrix // for apply_cholesky_solve auto input_working_copy = cloneBatchedColumnMajor(input); // 'input' tensor has to be a batch of diagonal matrix input.fill_(0); input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); Tensor result_u, input_u; if (input.dim() == 2) { // unsqueezing here so that the batched version is used result_u = input.unsqueeze(0); input_u = input_working_copy.unsqueeze(0); } else { result_u = input; input_u = input_working_copy; } // magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument // it returns a single 'magma_int_t' // if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value. int64_t info_tmp = 0; apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp); infos.fill_(info_tmp); #endif } // This is a type dispatching helper function for 'apply_cholesky_inverse' Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) { // This function calculates the inverse matrix in-place // result should be in column major order and contain matrices to invert // the content of result is overwritten by 'apply_cholesky_inverse' AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{ apply_cholesky_inverse<scalar_t>(result, infos, upper); }); return result; } REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) { #ifndef USE_MAGMA AT_ERROR("lu: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_int_t k = ::min(m, n); if (self.dim() == 2) { // If `pivots` is defined, then we have to compute them. // magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute // the partially-pivoted LU decomposition with / without pivots. // The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots. // The data is later copied back to the appropriate output tensor. Tensor info_tmp = at::zeros({}, at::kInt); if (get_pivots) { Tensor piv_tmp = at::empty({k}, at::kInt); magmaLu<scalar_t>( m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>()); pivots.copy_(piv_tmp); } else { magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>()); } infos.copy_(info_tmp); } else { auto self_matrix_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); scalar_t** self_array; ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_matrix_stride]; } MAGMAQueue magma_queue(self.get_device()); // Same comment as in the case of single matrix above. if (get_pivots) { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto pivots_matrix_stride = pivots.size(-1); magma_int_t** pivots_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_matrix_stride]; } magmaLuBatched<scalar_t>( m, n, self_array, m, pivots_array, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } else { magmaLuNoPivBatched<scalar_t>( m, n, self_array, m, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } } #endif } std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) { TORCH_CHECK(self.dim() >= 2, "expected tensor with 2 or more dimensions, got size: ", self.sizes(), " instead"); auto m = self.size(-2); auto n = self.size(-1); auto k = ::min(m, n); auto req_size = self.sizes().vec(); req_size.pop_back(); req_size.back() = k; Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous(); req_size.pop_back(); auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt)); Tensor self_working_copy; if (self.numel() == 0) { self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } else { self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{ apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot); }); } if (check_errors) { if (self.dim() == 2) { singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true); } else { batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true); } } return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) { #ifndef USE_MAGMA AT_ERROR("triangular_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans; magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); // magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched // magmaTriangularSolve is calling cuBLAS and it prints // ** On entry to DTRSM parameter number 9 had an illegal value // so let's use proper lda parameter here magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); MAGMAQueue magma_queue(b.get_device()); // batch_size == 1 implies that: // 1. the RHS and LHS tensors have 2 dimensions, or // 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1 if (batch_size == 1) { // TODO: this magma call is just a wrapper around cublas<t>trsm, consider using cublas directly here magmaTriangularSolve<scalar_t>( uplo, trans, diag, n, nrhs, A_data, lda, b_data, lda, magma_queue); } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, A_array_cur, lda, b_array_cur, lda, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper, bool transpose, bool unitriangular) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular); }); return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns, bool compute_q, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("qr: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)"); magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)"); auto r_data = R.data_ptr<scalar_t>(); auto r_matrix_stride = matrixStride(R); magma_int_t k = m < n ? m : n; magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); int64_t batch_size = batchCount(R); // magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors. // The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors. Tensor tau = at::empty({k}, Q.options().device(at::kCPU)); Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options()); scalar_t* tau_data = tau.data_ptr<scalar_t>(); scalar_t* work_data = work.data_ptr<scalar_t>(); // This phase computes R (the raw version) // This uses MAGMA's ?geqrf2_gpu function magma_int_t info = 0; for (int64_t i = 0; i < batch_size; i++) { scalar_t* r_working_ptr = &r_data[i * r_matrix_stride]; magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true); infos[i] = info; if (info != 0) { return; } } if (!compute_q) { // this is for mode='r' return; } // This phase computes Q (the raw version) // We require to perform ?geqrf_gpu again due to this bug in MAGMA: // - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly. // - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu // Refer to the below link for more details: // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 auto q_data = Q.data_ptr<scalar_t>(); auto q_matrix_stride = matrixStride(Q); for (int64_t i = 0; i < batch_size; i++) { scalar_t* q_working_ptr = &q_data[i * q_matrix_stride]; magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false); infos[i] = info; if (info != 0) { return; } magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor,Tensor> _linalg_qr_helper_cuda(const Tensor& self, std::string mode) { bool compute_q, reduced; std::tie(compute_q, reduced) = _parse_qr_mode(mode); std::vector<int64_t> infos(batchCount(self), 0); // Setup input geometry and inputs for apply_qr std::vector<int64_t> q_sizes, q_strides; int64_t n_columns_q; std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced); Tensor q_working_copy, r_working_copy; // If there are no elements, then we simply return a pair of tensors of required dimensions if (self.numel() == 0) { int64_t n = self.size(-1); r_working_copy = at::empty({n_columns_q, n}, self.options()); if (compute_q) { int64_t n_rows_q = q_sizes[self.dim() - 2]; q_working_copy = at::eye(n_rows_q, n_columns_q, self.options()); } else { q_working_copy = at::empty({0}, self.options()); } return std::make_tuple(q_working_copy, r_working_copy); } if (compute_q) { q_working_copy = at::empty_strided(q_sizes, q_strides, self.options()); q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self); } else { q_working_copy = at::empty({0}, self.options()); } r_working_copy = cloneBatchedColumnMajor(self); int64_t m = q_sizes[self.dim() - 2]; int64_t n = r_working_copy.size(-1); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{ apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "qr_cuda"); } else { singleCheckErrors(infos[0], "qr_cuda"); } if (compute_q) { q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q); } r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu(); return std::make_tuple(q_working_copy, r_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("symeig: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto eigvals_data = eigvals.data_ptr<value_t>(); auto self_matrix_stride = matrixStride(self); auto eigvals_stride = eigvals.size(-1); int64_t batch_size = batchCount(self); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec; scalar_t* wA; ALLOCATE_ARRAY(wA, scalar_t, n * n); magma_int_t info; // Run once, first to get the optimum work sizes. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t liwork = -1; magma_int_t iwkopt; magma_int_t lrwork = -1; value_t rwkopt; magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info); scalar_t* work; magma_int_t* iwork; lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size"); liwork = magma_int_cast(iwkopt, "iwork_size"); ALLOCATE_ARRAY(work, scalar_t, lwork); ALLOCATE_ARRAY(iwork, magma_int_t, liwork); value_t* rwork = nullptr; c10::Storage storage_rwork; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { lrwork = magma_int_cast(rwkopt, "rwork_size"); storage_rwork = pin_memory<value_t>(lrwork); rwork = static_cast<value_t*>(storage_rwork.data()); } for (int64_t i = 0; i < batch_size; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride]; magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr, wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); auto self_sizes = self.sizes().vec(); self_sizes.pop_back(); ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype())); // magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors. // The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues. // The data is later moved to the appropriate device. // In the case where self.numel() == 0, we just return an empty tensor of // dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)") auto eigvals_working_copy = self.numel() == 0 ? at::empty(self_sizes, self.options().dtype(dtype)) : at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU)); if (self.numel() == 0) { return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT)); } auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{ apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "symeig_cuda"); } else { singleCheckErrors(infos[0], "symeig_cuda"); } if (eigenvectors) { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy); } else { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options())); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU // memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy // the returned values from CPU to GPU. See also magmaSymeig, which uses a // similar approach. template <typename scalar_t> static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs, int64_t *info_ptr) { #ifndef USE_MAGMA TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. " "Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA."); #else TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor"); using value_t = typename c10::scalar_value_type<scalar_t>::type; magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec; magma_int_t n = magma_int_cast(self.size(-1), "n"); auto self_data = self.data_ptr<scalar_t>(); auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>(); scalar_t *wr = out_eigvals_data; scalar_t *vr_data = NULL; magma_int_t ldvr = 1; if (jobvr == MagmaVec) { vr_data = out_eigvecs.data_ptr<scalar_t>(); ldvr = n; } value_t *rwork_data = nullptr; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { ALLOCATE_ARRAY(rwork_data, value_t, n*2); } if (n > 0) { // call magmaEig once to get the optimal size of work_data scalar_t wkopt; magma_int_t info; magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info); magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt)); // call it a 2nd time to to the actual work scalar_t *work_data = nullptr; ALLOCATE_ARRAY(work_data, scalar_t, lwork); magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info); *info_ptr = info; } #endif } /* * Internal helper; like eig_cuda but: * 1. assume that self is a square matrix of side "n" * 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory * by the caller */ std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) { int64_t n = self.size(-1); // copy self to pinned CPU memory auto self_working_copy = at::empty_strided( {n, n}, // square matrix {1, n}, // column-ordered, as magmaEig expects at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); // tensors holding the results. We use empty_strided to make them column-ordered auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor out_eigvals; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { out_eigvals = at::empty({n}, options); } else { out_eigvals = at::empty_strided({n, 2}, {1, n}, options); } auto out_eigvecs = eigenvectors ? at::empty_strided({n, n}, {1, n}, options) : Tensor(); int64_t info; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{ apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info); }); singleCheckErrors(info, "eig_cuda"); return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs); } REGISTER_DISPATCH(eig_stub, &eig_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ syevd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // This function computes eigenvalues 'w' and eigenvectors 'v' of the tensor 'self' // compute_eigenvectors controls whether eigenvectors should be computed // uplo controls the portion of input matrix to consider in computations, allowed values are "u", "U", "l", "L" // '_symeig_helper_cuda' prepares correct input for 'apply_symeig' and checks for possible errors using 'infos' // See also CPU implementation in aten/src/ATen/native/BatchLinearAlgebra.cpp std::tuple<Tensor, Tensor> _syevd_helper_cuda(const Tensor& self, bool compute_eigenvectors, std::string uplo_str) { // NumPy allows lowercase input for UPLO argument // It is assumed that uplo_str is either "U" or "L" char uplo = std::toupper(uplo_str[0]); bool upper = uplo == 'U' ? true : false; return _symeig_helper_cuda(self, compute_eigenvectors, upper); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<typename scalar_t> static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT, char jobchar, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("svd: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); auto batchsize = batchCount(self); magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); auto mn = ::min(m, n); c10::Storage storage_rwork; value_t* rwork = nullptr; magma_int_t* iwork; ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn); if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { auto lrwork = computeLRWorkDim(jobchar, m, n); storage_rwork = pin_memory<value_t>(lrwork); rwork = static_cast<value_t*>(storage_rwork.data()); } magma_int_t info = 0; // Run once, first to get the optimum work size. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info); lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size"); scalar_t* work; ALLOCATE_ARRAY(work, scalar_t, lwork); for (int64_t i = 0; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_stride]; value_t* S_working_ptr = &S_data[i * S_stride]; scalar_t* U_working_ptr = &U_data[i * U_stride]; scalar_t* VT_working_ptr = &VT_data[i * VT_stride]; // Compute S, U (optionally), VT (optionally) magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m, S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) { std::vector<int64_t> infos(batchCount(self), 0); int64_t m = self.size(-2), n = self.size(-1); int64_t k = ::min(m, n); char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N'; Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv); if (self.numel() > 0) { // The input matrix, U, S and VT have to reside in pinned memory. // Additionally, the input and U have to be in column major format. // _create_U_S_VT takes care of a part of these requirements (for U, S and VT) // For the input matrix, this requirements are being taken care of below. // Specify strides auto self_col_major_strides = at::detail::defaultStrides(self.sizes()); self_col_major_strides[self.dim() - 2] = 1; self_col_major_strides[self.dim() - 1] = m; // Create strided tensor in pinned memory auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides, at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] { apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "svd_cuda"); } else { singleCheckErrors(infos[0], "svd_cuda"); } U_working_copy = same_stride_to(U_working_copy, self.options()); S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device())); VT_working_copy = same_stride_to(VT_working_copy, self.options()); if (compute_uv) { if (some) { VT_working_copy = VT_working_copy.narrow(-2, 0, k); } } else { VT_working_copy.zero_(); U_working_copy.zero_(); } } else { U_working_copy = same_stride_to(U_working_copy, self.options()).zero_(); S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device())); VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_(); } // so far we have computed VT, but torch.svd returns V instead. Adjust accordingly. // Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V. VT_working_copy = VT_working_copy.conj(); VT_working_copy.transpose_(-2, -1); return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) { #ifdef USE_CUSOLVER return _svd_helper_cuda_lib(self, some, compute_uv); #else return _svd_helper_cuda_legacy(self, some, compute_uv); #endif } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("lu_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto n = lu.size(-2); auto nrhs = b.size(-1); int info_tmp = 0; if (b.dim() == 2) { Tensor pivots_tmp = pivots.cpu(); magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp); info = info_tmp; } else { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto b_stride = matrixStride(b); auto lu_stride = matrixStride(lu); auto pivots_stride = pivots.size(-1); magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount"); magma_int_t** pivots_array; scalar_t** lu_array; scalar_t** b_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_stride]; b_array[i] = &b_data[i * b_stride]; lu_array[i] = &lu_data[i * lu_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** lu_array_cur = &lu_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** pivots_array_cur = &pivots_array[mini_idx]; magmaLuSolveBatched<scalar_t>( n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaLuSolveBatched<scalar_t>( n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data); auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous(); if (self.numel() == 0 || LU_data.numel() == 0) { return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{ apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info); }); TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info); return self_working_copy; } }} // namespace at::native #undef ALLOCATE_ARRAY
0b705ff736b91b25d61132dcd8468e93bf52cf01.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/PinnedMemoryAllocator.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/cuda/MiscUtils.h> #include <ATen/native/Resize.h> #include <ATen/native/BatchLinearAlgebra.h> #include <ATen/native/cuda/BatchLinearAlgebraLib.h> #include <ATen/native/cpu/zmath.h> #include <THC/THC.h> // for USE_MAGMA #ifdef USE_MAGMA #include <magma_types.h> #include <magma_v2.h> const bool use_magma_ = true; #else const bool use_magma_ = false; #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLu( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info); template<class scalar_t> void magmaLuBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLuNoPiv( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaLuNoPivBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n); template<class scalar_t> void magmaGetri( magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork, magma_int_t lwork, magma_int_t* info); template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholeskySolve( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaCholeskySolveBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholesky( magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template <class scalar_t> void magmaTriangularSolve( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaTriangularSolveBatched( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n); template<class scalar_t> void magmaGeqrf( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2); template<class scalar_t> void magmaOrgqr( magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info); template<class scalar_t, class value_t=scalar_t> void magmaSymeig( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info); template<class scalar_t, class value_t=scalar_t> void magmaEig( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda, scalar_t *w, scalar_t *VL, magma_int_t ldvl, scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork, value_t *rwork, magma_int_t *info); template<class scalar_t, class value_t=scalar_t> void magmaSvd( magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A, magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu, scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork, value_t* rwork, magma_int_t* iwork, magma_int_t* info); template<class scalar_t> void magmaLuSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaLuSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<> void magmaSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolve<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgesv_gpu(n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolve<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgesv_gpu(n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_zgesv_batched(n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_cgesv_batched(n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) { return magma_get_dgetri_nb(n); } template<> inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) { return magma_get_sgetri_nb(n); } template <> inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>( magma_int_t n) { return magma_get_zgetri_nb(n); } template <> inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>( magma_int_t n) { return magma_get_cgetri_nb(n); } template<> void magmaGetri<double>( magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGetri<float>( magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGetri<c10::complex<double>>( magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetri_gpu( n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dwork), lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGetri<c10::complex<float>>( magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetri_gpu( n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dwork), lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGetriBatched<c10::complex<double>>( magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, c10::complex<double>** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetri_outofplace_batched( n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, reinterpret_cast<magmaDoubleComplex**>(dinvA_array), lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGetriBatched<c10::complex<float>>( magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, c10::complex<float>** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetri_outofplace_batched( n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, reinterpret_cast<magmaFloatComplex**>(dinvA_array), lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zpotrs_gpu(uplo, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cpotrs_gpu(uplo, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_zpotrs_batched(uplo, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_cpotrs_batched(uplo, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<double>( magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<float>( magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaTriangularSolve<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, const MAGMAQueue& magma_queue) { magma_dtrsm( MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaTriangularSolve<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, const MAGMAQueue& magma_queue) { magma_strsm( MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaTriangularSolve<c10::complex<double>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb, const MAGMAQueue& magma_queue) { magmaDoubleComplex alpha({1, 0}); magma_ztrsm( MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaTriangularSolve<c10::complex<float>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb, const MAGMAQueue& magma_queue) { magmaFloatComplex alpha({1, 0}); magma_ctrsm( MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(dB), lddb, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<c10::complex<double>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmaDoubleComplex alpha({1, 0}); magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<c10::complex<float>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmaFloatComplex alpha({1, 0}); magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) { return magma_get_dgeqrf_nb(m, n); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) { return magma_get_sgeqrf_nb(m, n); } template <> inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>( magma_int_t m, magma_int_t n) { return magma_get_zgeqrf_nb(m, n); } template <> inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>( magma_int_t m, magma_int_t n) { return magma_get_cgeqrf_nb(m, n); } template<> void magmaGeqrf<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGeqrf<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGeqrf<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* tau, c10::complex<double>* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_zgeqrf_gpu( m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), reinterpret_cast<magmaDoubleComplex*>(dT), info); } else { magma_zgeqrf2_gpu( m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), info); } AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGeqrf<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* tau, c10::complex<float>* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_cgeqrf_gpu( m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), reinterpret_cast<magmaFloatComplex*>(dT), info); } else { magma_cgeqrf2_gpu( m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), info); } AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaOrgqr<double>( magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaOrgqr<float>( magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaOrgqr<c10::complex<double>>( magma_int_t m, magma_int_t n, magma_int_t k, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* tau, c10::complex<double>* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zungqr_gpu( m, n, k, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), reinterpret_cast<magmaDoubleComplex*>(dT), nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaOrgqr<c10::complex<float>>( magma_int_t m, magma_int_t n, magma_int_t k, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* tau, c10::complex<float>* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cungqr_gpu( m, n, k, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), reinterpret_cast<magmaFloatComplex*>(dT), nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { (void)rwork; // unused (void)lrwork; // unused MagmaStreamSyncGuard guard; magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { (void)rwork; // unused (void)lrwork; // unused MagmaStreamSyncGuard guard; magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<c10::complex<double>, double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zheevd_gpu( jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA), ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<c10::complex<float>, float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cheevd_gpu( jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA), ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaEig<double>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, double *A, magma_int_t lda, double *w, double *VL, magma_int_t ldvl, double *VR, magma_int_t ldvr, double *work, magma_int_t lwork, double *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; // magma [sd]geev wants to separate output arrays: wr and wi for the real // and imaginary parts double *wr = w; double *wi = w + n; (void)rwork; // unused magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaEig<float>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, float *A, magma_int_t lda, float *w, float *VL, magma_int_t ldvl, float *VR, magma_int_t ldvr, float *work, magma_int_t lwork, float *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; float *wr = w; float *wi = w + n; (void)rwork; // unused magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaEig<c10::complex<double>, double>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, c10::complex<double> *A, magma_int_t lda, c10::complex<double> *w, c10::complex<double> *VL, magma_int_t ldvl, c10::complex<double> *VR, magma_int_t ldvr, c10::complex<double> *work, magma_int_t lwork, double *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; magma_zgeev(jobvl, jobvr, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, reinterpret_cast<magmaDoubleComplex*>(w), reinterpret_cast<magmaDoubleComplex*>(VL), ldvl, reinterpret_cast<magmaDoubleComplex*>(VR), ldvr, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaEig<c10::complex<float>, float>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, c10::complex<float> *A, magma_int_t lda, c10::complex<float> *w, c10::complex<float> *VL, magma_int_t ldvl, c10::complex<float> *VR, magma_int_t ldvr, c10::complex<float> *work, magma_int_t lwork, float *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; magma_cgeev(jobvl, jobvr, n, reinterpret_cast<magmaFloatComplex*>(A), lda, reinterpret_cast<magmaFloatComplex*>(w), reinterpret_cast<magmaFloatComplex*>(VL), ldvl, reinterpret_cast<magmaFloatComplex*>(VR), ldvr, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A, magma_int_t lda, double* s, double* U, magma_int_t ldu, double* VT, magma_int_t ldvt, double* work, magma_int_t lwork, double *rwork, magma_int_t* iwork, magma_int_t* info) { (void)rwork; // unused MagmaStreamSyncGuard guard; magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A, magma_int_t lda, float* s, float* U, magma_int_t ldu, float* VT, magma_int_t ldvt, float* work, magma_int_t lwork, float* rwork, magma_int_t* iwork, magma_int_t* info) { (void)rwork; // unused MagmaStreamSyncGuard guard; magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<c10::complex<float>, float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A, magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu, c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork, float *rwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s, reinterpret_cast<magmaFloatComplex*>(U), ldu, reinterpret_cast<magmaFloatComplex*>(VT), ldvt, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<c10::complex<double>, double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A, magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu, c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork, double *rwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s, reinterpret_cast<magmaDoubleComplex*>(U), ldu, reinterpret_cast<magmaDoubleComplex*>(VT), ldvt, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } #endif #define ALLOCATE_ARRAY(name, type, size) \ auto storage_##name = pin_memory<type>(size); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_solve(Tensor& b, Tensor& A, Tensor& infos) { #ifndef USE_MAGMA AT_ERROR("solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t lda = std::max(magma_int_t{1}, n); if (b.dim() == 2) { auto ipiv = at::empty({n}, at::kInt); infos = infos.to(at::kCPU); // magmaSolve requires infos tensor to live on CPU magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(), b_data, lda, infos.data_ptr<magma_int_t>()); } else { auto infos_data = infos.data_ptr<magma_int_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &infos_data[mini_idx]; magmaSolveBatched<scalar_t>( n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaSolveBatched<scalar_t>( n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda, &infos_data[mini_idx], batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); auto infos = at::empty({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{ apply_solve<scalar_t>(self_working_copy, A_working_copy, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "solve_cuda"); } else { singleCheckErrors(infos.item().toInt(), "solve_cuda"); } return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // This is a type dispatching helper function for 'apply_solve' Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) { // 'result' and 'input' should be in column major order (it should be checked before calling this function) // the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve' // 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations) // 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system) AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{ apply_solve<scalar_t>(result, input, infos); }); return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'. 'infos' is an int Tensor containing error codes for each matrix in the batched input. 'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors For more information see MAGMA's documentation for GETRI and GETRF routines. */ template <typename scalar_t> static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto infos_lu_data = infos_lu.data_ptr<magma_int_t>(); auto infos_getri_data = infos_getri.data_ptr<magma_int_t>(); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); // MAGMA does not work with batch_size == 0, let's return early in this case if (batch_size == 0) { return; } magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(self.get_device()); magmaLuBatched<scalar_t>( n, n, self_array, lda, ipiv_array, infos_lu_data, batch_size, magma_queue); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; scalar_t** self_inv_array_cur = &self_inv_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx]; magmaGetriBatched<scalar_t>( n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur, lda, info_array_cur_getri, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaGetriBatched<scalar_t>( n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx], lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue); } #endif } template <typename scalar_t> static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n); // magmaLu and magmaGetri requires infos tensor to live on CPU infos_lu = infos_lu.to(at::kCPU); infos_getri = infos_getri.to(at::kCPU); Tensor ipiv = at::empty({lda}, at::kInt); Tensor dwork = at::empty({lwork}, self.options()); magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>()); magmaGetri<scalar_t>( n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>()); #endif } Tensor _inverse_helper_cuda_legacy(const Tensor& self) { auto self_inv_working_copy = cloneBatchedColumnMajor(self); if (self.dim() > 2) { auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos_lu, infos_getri); }); batchCheckErrors(infos_lu, "inverse_cuda"); batchCheckErrors(infos_getri, "inverse_cuda"); } else { // magmaLu and magmaGetri requires infos tensor to live on CPU auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU)); auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri); }); singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda"); singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda"); } return self_inv_working_copy; } Tensor _inverse_helper_cuda(const Tensor& self) { #ifdef USE_CUSOLVER if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) { return _inverse_helper_cuda_lib(self); // cusolver or cublas } else { return _inverse_helper_cuda_legacy(self); // magma-cuda } #else return _inverse_helper_cuda_legacy(self); // magma-cuda #endif } // This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors' Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) { // assuming result is in column major order and contains the matrices to invert if (result.dim() > 2) { auto input_working_copy = cloneBatchedColumnMajor(result); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_batched_inverse<scalar_t>( input_working_copy, result, infos_lu, infos_getri); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_single_inverse<scalar_t>(result, infos_lu, infos_getri); }); } return result; } // This is a MAGMA/cuSOLVER dispatching helper function Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) { // This function calculates the inverse matrix in-place // result should be in column major order and contain matrices to invert #ifdef USE_CUSOLVER if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) { return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas } else { return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda } #else return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda #endif return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("cholesky_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); int info_tmp = 0; if (b.dim() == 2) { magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda, b_data, lda, &info_tmp); info = info_tmp; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{ apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info); }); TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); auto lda = std::max<magma_int_t>(1, n); if (self.dim() == 2) { magma_int_t info = 0; magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info); infos[0] = info; } else { auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); int64_t batch_limit = self.is_complex() ? 65535 : 262140; // Compute as many batches of 262140 possible // 262140 is the size of the largest batch of matrices that can be run with // violating maximum kernel configuration // For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information) // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaCholeskyBatched<scalar_t>( uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaCholeskyBatched<scalar_t>( uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor result; if (self.dim() > 2) { // MAGMA's batched cholesky operator has an off-by-one error causing IMA // (see https://github.com/pytorch/pytorch/issues/42666). This code is based // on the #cloneBatchedColumnMajor function however it pads the input with // one extra element utilizing the fact that the resize_as_ method preserves // the storage even if it's larger than the new sizes. This way if MAGMA // reads off bounds it will still be valid user memory. const Tensor input = upper ? self : self.transpose(-1, -2); result = at::empty(input.numel() + 1, input.options()); result.resize_as_(input).copy_(input).transpose_(-1, -2); } else { result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES( self.scalar_type(), "cholesky_cuda", [&] { apply_cholesky<scalar_t>(result, false, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "cholesky_cuda"); } else { singleCheckErrors(infos[0], "cholesky_cuda"); } return upper ? result.transpose_(-1, -2) : result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver This is an in-place routine, content of 'input' is overwritten. 'infos' is an int Tensor containing error codes for each matrix in the batched input. MAGMA requires 'infos' to reside in CPU memory. For more information see MAGMA's documentation for POTRS routine. */ template <typename scalar_t> static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) { #ifndef USE_MAGMA TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA."); #else // magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally // it transfers data several times between GPU and CPU and calls lapack routine on CPU // using magmaCholeskySolveBatched is a lot faster // note that magmaCholeskySolve is also slow // 'input' is modified in-place we need to clone it and replace with a diagonal matrix // for apply_cholesky_solve auto input_working_copy = cloneBatchedColumnMajor(input); // 'input' tensor has to be a batch of diagonal matrix input.fill_(0); input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); Tensor result_u, input_u; if (input.dim() == 2) { // unsqueezing here so that the batched version is used result_u = input.unsqueeze(0); input_u = input_working_copy.unsqueeze(0); } else { result_u = input; input_u = input_working_copy; } // magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument // it returns a single 'magma_int_t' // if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value. int64_t info_tmp = 0; apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp); infos.fill_(info_tmp); #endif } // This is a type dispatching helper function for 'apply_cholesky_inverse' Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) { // This function calculates the inverse matrix in-place // result should be in column major order and contain matrices to invert // the content of result is overwritten by 'apply_cholesky_inverse' AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{ apply_cholesky_inverse<scalar_t>(result, infos, upper); }); return result; } REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) { #ifndef USE_MAGMA AT_ERROR("lu: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_int_t k = std::min(m, n); if (self.dim() == 2) { // If `pivots` is defined, then we have to compute them. // magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute // the partially-pivoted LU decomposition with / without pivots. // The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots. // The data is later copied back to the appropriate output tensor. Tensor info_tmp = at::zeros({}, at::kInt); if (get_pivots) { Tensor piv_tmp = at::empty({k}, at::kInt); magmaLu<scalar_t>( m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>()); pivots.copy_(piv_tmp); } else { magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>()); } infos.copy_(info_tmp); } else { auto self_matrix_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); scalar_t** self_array; ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_matrix_stride]; } MAGMAQueue magma_queue(self.get_device()); // Same comment as in the case of single matrix above. if (get_pivots) { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto pivots_matrix_stride = pivots.size(-1); magma_int_t** pivots_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_matrix_stride]; } magmaLuBatched<scalar_t>( m, n, self_array, m, pivots_array, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } else { magmaLuNoPivBatched<scalar_t>( m, n, self_array, m, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } } #endif } std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) { TORCH_CHECK(self.dim() >= 2, "expected tensor with 2 or more dimensions, got size: ", self.sizes(), " instead"); auto m = self.size(-2); auto n = self.size(-1); auto k = std::min(m, n); auto req_size = self.sizes().vec(); req_size.pop_back(); req_size.back() = k; Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous(); req_size.pop_back(); auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt)); Tensor self_working_copy; if (self.numel() == 0) { self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } else { self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{ apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot); }); } if (check_errors) { if (self.dim() == 2) { singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true); } else { batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true); } } return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) { #ifndef USE_MAGMA AT_ERROR("triangular_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans; magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); // magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched // magmaTriangularSolve is calling cuBLAS and it prints // ** On entry to DTRSM parameter number 9 had an illegal value // so let's use proper lda parameter here magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); MAGMAQueue magma_queue(b.get_device()); // batch_size == 1 implies that: // 1. the RHS and LHS tensors have 2 dimensions, or // 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1 if (batch_size == 1) { // TODO: this magma call is just a wrapper around cublas<t>trsm, consider using cublas directly here magmaTriangularSolve<scalar_t>( uplo, trans, diag, n, nrhs, A_data, lda, b_data, lda, magma_queue); } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, A_array_cur, lda, b_array_cur, lda, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper, bool transpose, bool unitriangular) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular); }); return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns, bool compute_q, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("qr: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)"); magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)"); auto r_data = R.data_ptr<scalar_t>(); auto r_matrix_stride = matrixStride(R); magma_int_t k = m < n ? m : n; magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); int64_t batch_size = batchCount(R); // magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors. // The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors. Tensor tau = at::empty({k}, Q.options().device(at::kCPU)); Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options()); scalar_t* tau_data = tau.data_ptr<scalar_t>(); scalar_t* work_data = work.data_ptr<scalar_t>(); // This phase computes R (the raw version) // This uses MAGMA's ?geqrf2_gpu function magma_int_t info = 0; for (int64_t i = 0; i < batch_size; i++) { scalar_t* r_working_ptr = &r_data[i * r_matrix_stride]; magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true); infos[i] = info; if (info != 0) { return; } } if (!compute_q) { // this is for mode='r' return; } // This phase computes Q (the raw version) // We require to perform ?geqrf_gpu again due to this bug in MAGMA: // - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly. // - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu // Refer to the below link for more details: // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 auto q_data = Q.data_ptr<scalar_t>(); auto q_matrix_stride = matrixStride(Q); for (int64_t i = 0; i < batch_size; i++) { scalar_t* q_working_ptr = &q_data[i * q_matrix_stride]; magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false); infos[i] = info; if (info != 0) { return; } magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor,Tensor> _linalg_qr_helper_cuda(const Tensor& self, std::string mode) { bool compute_q, reduced; std::tie(compute_q, reduced) = _parse_qr_mode(mode); std::vector<int64_t> infos(batchCount(self), 0); // Setup input geometry and inputs for apply_qr std::vector<int64_t> q_sizes, q_strides; int64_t n_columns_q; std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced); Tensor q_working_copy, r_working_copy; // If there are no elements, then we simply return a pair of tensors of required dimensions if (self.numel() == 0) { int64_t n = self.size(-1); r_working_copy = at::empty({n_columns_q, n}, self.options()); if (compute_q) { int64_t n_rows_q = q_sizes[self.dim() - 2]; q_working_copy = at::eye(n_rows_q, n_columns_q, self.options()); } else { q_working_copy = at::empty({0}, self.options()); } return std::make_tuple(q_working_copy, r_working_copy); } if (compute_q) { q_working_copy = at::empty_strided(q_sizes, q_strides, self.options()); q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self); } else { q_working_copy = at::empty({0}, self.options()); } r_working_copy = cloneBatchedColumnMajor(self); int64_t m = q_sizes[self.dim() - 2]; int64_t n = r_working_copy.size(-1); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{ apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "qr_cuda"); } else { singleCheckErrors(infos[0], "qr_cuda"); } if (compute_q) { q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q); } r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu(); return std::make_tuple(q_working_copy, r_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("symeig: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto eigvals_data = eigvals.data_ptr<value_t>(); auto self_matrix_stride = matrixStride(self); auto eigvals_stride = eigvals.size(-1); int64_t batch_size = batchCount(self); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec; scalar_t* wA; ALLOCATE_ARRAY(wA, scalar_t, n * n); magma_int_t info; // Run once, first to get the optimum work sizes. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t liwork = -1; magma_int_t iwkopt; magma_int_t lrwork = -1; value_t rwkopt; magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info); scalar_t* work; magma_int_t* iwork; lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size"); liwork = magma_int_cast(iwkopt, "iwork_size"); ALLOCATE_ARRAY(work, scalar_t, lwork); ALLOCATE_ARRAY(iwork, magma_int_t, liwork); value_t* rwork = nullptr; c10::Storage storage_rwork; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { lrwork = magma_int_cast(rwkopt, "rwork_size"); storage_rwork = pin_memory<value_t>(lrwork); rwork = static_cast<value_t*>(storage_rwork.data()); } for (int64_t i = 0; i < batch_size; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride]; magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr, wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); auto self_sizes = self.sizes().vec(); self_sizes.pop_back(); ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype())); // magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors. // The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues. // The data is later moved to the appropriate device. // In the case where self.numel() == 0, we just return an empty tensor of // dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)") auto eigvals_working_copy = self.numel() == 0 ? at::empty(self_sizes, self.options().dtype(dtype)) : at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU)); if (self.numel() == 0) { return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT)); } auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{ apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "symeig_cuda"); } else { singleCheckErrors(infos[0], "symeig_cuda"); } if (eigenvectors) { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy); } else { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options())); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU // memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy // the returned values from CPU to GPU. See also magmaSymeig, which uses a // similar approach. template <typename scalar_t> static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs, int64_t *info_ptr) { #ifndef USE_MAGMA TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. " "Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA."); #else TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor"); using value_t = typename c10::scalar_value_type<scalar_t>::type; magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec; magma_int_t n = magma_int_cast(self.size(-1), "n"); auto self_data = self.data_ptr<scalar_t>(); auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>(); scalar_t *wr = out_eigvals_data; scalar_t *vr_data = NULL; magma_int_t ldvr = 1; if (jobvr == MagmaVec) { vr_data = out_eigvecs.data_ptr<scalar_t>(); ldvr = n; } value_t *rwork_data = nullptr; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { ALLOCATE_ARRAY(rwork_data, value_t, n*2); } if (n > 0) { // call magmaEig once to get the optimal size of work_data scalar_t wkopt; magma_int_t info; magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info); magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt)); // call it a 2nd time to to the actual work scalar_t *work_data = nullptr; ALLOCATE_ARRAY(work_data, scalar_t, lwork); magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info); *info_ptr = info; } #endif } /* * Internal helper; like eig_cuda but: * 1. assume that self is a square matrix of side "n" * 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory * by the caller */ std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) { int64_t n = self.size(-1); // copy self to pinned CPU memory auto self_working_copy = at::empty_strided( {n, n}, // square matrix {1, n}, // column-ordered, as magmaEig expects at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); // tensors holding the results. We use empty_strided to make them column-ordered auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor out_eigvals; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { out_eigvals = at::empty({n}, options); } else { out_eigvals = at::empty_strided({n, 2}, {1, n}, options); } auto out_eigvecs = eigenvectors ? at::empty_strided({n, n}, {1, n}, options) : Tensor(); int64_t info; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{ apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info); }); singleCheckErrors(info, "eig_cuda"); return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs); } REGISTER_DISPATCH(eig_stub, &eig_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ syevd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // This function computes eigenvalues 'w' and eigenvectors 'v' of the tensor 'self' // compute_eigenvectors controls whether eigenvectors should be computed // uplo controls the portion of input matrix to consider in computations, allowed values are "u", "U", "l", "L" // '_symeig_helper_cuda' prepares correct input for 'apply_symeig' and checks for possible errors using 'infos' // See also CPU implementation in aten/src/ATen/native/BatchLinearAlgebra.cpp std::tuple<Tensor, Tensor> _syevd_helper_cuda(const Tensor& self, bool compute_eigenvectors, std::string uplo_str) { // NumPy allows lowercase input for UPLO argument // It is assumed that uplo_str is either "U" or "L" char uplo = std::toupper(uplo_str[0]); bool upper = uplo == 'U' ? true : false; return _symeig_helper_cuda(self, compute_eigenvectors, upper); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<typename scalar_t> static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT, char jobchar, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("svd: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); auto batchsize = batchCount(self); magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); auto mn = std::min(m, n); c10::Storage storage_rwork; value_t* rwork = nullptr; magma_int_t* iwork; ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn); if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { auto lrwork = computeLRWorkDim(jobchar, m, n); storage_rwork = pin_memory<value_t>(lrwork); rwork = static_cast<value_t*>(storage_rwork.data()); } magma_int_t info = 0; // Run once, first to get the optimum work size. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info); lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size"); scalar_t* work; ALLOCATE_ARRAY(work, scalar_t, lwork); for (int64_t i = 0; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_stride]; value_t* S_working_ptr = &S_data[i * S_stride]; scalar_t* U_working_ptr = &U_data[i * U_stride]; scalar_t* VT_working_ptr = &VT_data[i * VT_stride]; // Compute S, U (optionally), VT (optionally) magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m, S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) { std::vector<int64_t> infos(batchCount(self), 0); int64_t m = self.size(-2), n = self.size(-1); int64_t k = std::min(m, n); char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N'; Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv); if (self.numel() > 0) { // The input matrix, U, S and VT have to reside in pinned memory. // Additionally, the input and U have to be in column major format. // _create_U_S_VT takes care of a part of these requirements (for U, S and VT) // For the input matrix, this requirements are being taken care of below. // Specify strides auto self_col_major_strides = at::detail::defaultStrides(self.sizes()); self_col_major_strides[self.dim() - 2] = 1; self_col_major_strides[self.dim() - 1] = m; // Create strided tensor in pinned memory auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides, at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] { apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "svd_cuda"); } else { singleCheckErrors(infos[0], "svd_cuda"); } U_working_copy = same_stride_to(U_working_copy, self.options()); S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device())); VT_working_copy = same_stride_to(VT_working_copy, self.options()); if (compute_uv) { if (some) { VT_working_copy = VT_working_copy.narrow(-2, 0, k); } } else { VT_working_copy.zero_(); U_working_copy.zero_(); } } else { U_working_copy = same_stride_to(U_working_copy, self.options()).zero_(); S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device())); VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_(); } // so far we have computed VT, but torch.svd returns V instead. Adjust accordingly. // Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V. VT_working_copy = VT_working_copy.conj(); VT_working_copy.transpose_(-2, -1); return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) { #ifdef USE_CUSOLVER return _svd_helper_cuda_lib(self, some, compute_uv); #else return _svd_helper_cuda_legacy(self, some, compute_uv); #endif } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("lu_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto n = lu.size(-2); auto nrhs = b.size(-1); int info_tmp = 0; if (b.dim() == 2) { Tensor pivots_tmp = pivots.cpu(); magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp); info = info_tmp; } else { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto b_stride = matrixStride(b); auto lu_stride = matrixStride(lu); auto pivots_stride = pivots.size(-1); magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount"); magma_int_t** pivots_array; scalar_t** lu_array; scalar_t** b_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_stride]; b_array[i] = &b_data[i * b_stride]; lu_array[i] = &lu_data[i * lu_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** lu_array_cur = &lu_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** pivots_array_cur = &pivots_array[mini_idx]; magmaLuSolveBatched<scalar_t>( n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaLuSolveBatched<scalar_t>( n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data); auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous(); if (self.numel() == 0 || LU_data.numel() == 0) { return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{ apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info); }); TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info); return self_working_copy; } }} // namespace at::native #undef ALLOCATE_ARRAY
45f55acf1f971cbddbc0c965bced95f540b2f6c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <clover_field_order.h> #include <tune_quda.h> namespace quda { using namespace clover; #ifdef GPU_CLOVER_DIRAC /** Kernel argument struct */ template <typename Out, typename In> struct CopyCloverArg { Out out; const In in; int volumeCB; CopyCloverArg (const Out &out, const In in, int volume) : out(out), in(in), volumeCB(in.volumeCB) { } }; /** Generic CPU clover reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> void copyClover(CopyCloverArg<Out,In> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int x=0; x<arg.volumeCB; x++) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, parity); } } } /** Generic CUDA clover reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> __global__ void copyCloverKernel(CopyCloverArg<Out,In> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= arg.volumeCB) return; RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, parity); } } template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> class CopyClover : Tunable { CopyCloverArg<Out,In> arg; const CloverField &meta; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.volumeCB; } public: CopyClover(CopyCloverArg<Out,In> &arg, const CloverField &meta) : arg(arg), meta(meta) { writeAuxString("out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride); } virtual ~CopyClover() { ; } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( copyCloverKernel<FloatOut, FloatIn, length, Out, In>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg); } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } long long bytes() const { return 2*arg.volumeCB*(arg.in.Bytes() + arg.out.Bytes()); } }; template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyClover(OutOrder outOrder, const InOrder inOrder, const CloverField &out, QudaFieldLocation location) { CopyCloverArg<OutOrder,InOrder> arg(outOrder, inOrder, out.Volume()); if (location == QUDA_CPU_FIELD_LOCATION) { copyClover<FloatOut, FloatIn, length, OutOrder, InOrder>(arg); } else if (location == QUDA_CUDA_FIELD_LOCATION) { CopyClover<FloatOut, FloatIn, length, OutOrder, InOrder> cloverCopier(arg, out); cloverCopier.apply(0); } else { errorQuda("Undefined field location %d for copyClover", location); } } template <typename FloatOut, typename FloatIn, int length, typename InOrder> void copyClover(const InOrder &inOrder, CloverField &out, bool inverse, QudaFieldLocation location, FloatOut *Out, float *outNorm) { if (out.isNative()) { typedef typename clover_mapper<FloatOut>::type C; copyClover<FloatOut,FloatIn,length>(C(out, inverse, Out, outNorm), inOrder, out, location); } else if (out.Order() == QUDA_PACKED_CLOVER_ORDER) { copyClover<FloatOut,FloatIn,length> (QDPOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location); } else if (out.Order() == QUDA_QDPJIT_CLOVER_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyClover<FloatOut,FloatIn,length> (QDPJITOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (out.Order() == QUDA_BQCD_CLOVER_ORDER) { errorQuda("BQCD output not supported"); } else { errorQuda("Clover field %d order not supported", out.Order()); } } template <typename FloatOut, typename FloatIn, int length> void copyClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location, FloatOut *Out, FloatIn *In, float *outNorm, float *inNorm) { // reconstruction only supported on FloatN fields currently if (in.isNative()) { typedef typename clover_mapper<FloatIn>::type C; copyClover<FloatOut,FloatIn,length>(C(in, inverse, In, inNorm), out, inverse, location, Out, outNorm); } else if (in.Order() == QUDA_PACKED_CLOVER_ORDER) { copyClover<FloatOut,FloatIn,length> (QDPOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); } else if (in.Order() == QUDA_QDPJIT_CLOVER_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyClover<FloatOut,FloatIn,length> (QDPJITOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (in.Order() == QUDA_BQCD_CLOVER_ORDER) { #ifdef BUILD_BQCD_INTERFACE copyClover<FloatOut,FloatIn,length> (BQCDOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); #else errorQuda("BQCD interface has not been built\n"); #endif } else { errorQuda("Clover field %d order not supported", in.Order()); } } #endif // this is the function that is actually called, from here on down we instantiate all required templates void copyGenericClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location, void *Out, void *In, void *outNorm, void *inNorm) { #ifdef GPU_CLOVER_DIRAC if (out.Precision() == QUDA_HALF_PRECISION && out.Order() > 4) errorQuda("Half precision not supported for order %d", out.Order()); if (in.Precision() == QUDA_HALF_PRECISION && in.Order() > 4) errorQuda("Half precision not supported for order %d", in.Order()); if (out.Precision() == QUDA_DOUBLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyClover<double,double,72>(out, in, inverse, location, (double*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<double,float,72>(out, in, inverse, location, (double*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<double,short,72>(out, in, inverse, location, (double*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } else if (out.Precision() == QUDA_SINGLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyClover<float,double,72>(out, in, inverse, location, (float*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<float,float,72>(out, in, inverse, location, (float*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<float,short,72>(out, in, inverse, location, (float*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } else if (out.Precision() == QUDA_HALF_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION){ copyClover<short,double,72>(out, in, inverse, location, (short*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<short,float,72>(out, in, inverse, location, (short*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<short,short,72>(out, in, inverse, location, (short*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } #else errorQuda("Clover has not been built"); #endif } } // namespace quda
45f55acf1f971cbddbc0c965bced95f540b2f6c3.cu
#include <clover_field_order.h> #include <tune_quda.h> namespace quda { using namespace clover; #ifdef GPU_CLOVER_DIRAC /** Kernel argument struct */ template <typename Out, typename In> struct CopyCloverArg { Out out; const In in; int volumeCB; CopyCloverArg (const Out &out, const In in, int volume) : out(out), in(in), volumeCB(in.volumeCB) { } }; /** Generic CPU clover reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> void copyClover(CopyCloverArg<Out,In> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int x=0; x<arg.volumeCB; x++) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, parity); } } } /** Generic CUDA clover reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> __global__ void copyCloverKernel(CopyCloverArg<Out,In> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= arg.volumeCB) return; RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, parity); } } template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> class CopyClover : Tunable { CopyCloverArg<Out,In> arg; const CloverField &meta; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.volumeCB; } public: CopyClover(CopyCloverArg<Out,In> &arg, const CloverField &meta) : arg(arg), meta(meta) { writeAuxString("out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride); } virtual ~CopyClover() { ; } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); copyCloverKernel<FloatOut, FloatIn, length, Out, In> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg); } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } long long bytes() const { return 2*arg.volumeCB*(arg.in.Bytes() + arg.out.Bytes()); } }; template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyClover(OutOrder outOrder, const InOrder inOrder, const CloverField &out, QudaFieldLocation location) { CopyCloverArg<OutOrder,InOrder> arg(outOrder, inOrder, out.Volume()); if (location == QUDA_CPU_FIELD_LOCATION) { copyClover<FloatOut, FloatIn, length, OutOrder, InOrder>(arg); } else if (location == QUDA_CUDA_FIELD_LOCATION) { CopyClover<FloatOut, FloatIn, length, OutOrder, InOrder> cloverCopier(arg, out); cloverCopier.apply(0); } else { errorQuda("Undefined field location %d for copyClover", location); } } template <typename FloatOut, typename FloatIn, int length, typename InOrder> void copyClover(const InOrder &inOrder, CloverField &out, bool inverse, QudaFieldLocation location, FloatOut *Out, float *outNorm) { if (out.isNative()) { typedef typename clover_mapper<FloatOut>::type C; copyClover<FloatOut,FloatIn,length>(C(out, inverse, Out, outNorm), inOrder, out, location); } else if (out.Order() == QUDA_PACKED_CLOVER_ORDER) { copyClover<FloatOut,FloatIn,length> (QDPOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location); } else if (out.Order() == QUDA_QDPJIT_CLOVER_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyClover<FloatOut,FloatIn,length> (QDPJITOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (out.Order() == QUDA_BQCD_CLOVER_ORDER) { errorQuda("BQCD output not supported"); } else { errorQuda("Clover field %d order not supported", out.Order()); } } template <typename FloatOut, typename FloatIn, int length> void copyClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location, FloatOut *Out, FloatIn *In, float *outNorm, float *inNorm) { // reconstruction only supported on FloatN fields currently if (in.isNative()) { typedef typename clover_mapper<FloatIn>::type C; copyClover<FloatOut,FloatIn,length>(C(in, inverse, In, inNorm), out, inverse, location, Out, outNorm); } else if (in.Order() == QUDA_PACKED_CLOVER_ORDER) { copyClover<FloatOut,FloatIn,length> (QDPOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); } else if (in.Order() == QUDA_QDPJIT_CLOVER_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyClover<FloatOut,FloatIn,length> (QDPJITOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (in.Order() == QUDA_BQCD_CLOVER_ORDER) { #ifdef BUILD_BQCD_INTERFACE copyClover<FloatOut,FloatIn,length> (BQCDOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); #else errorQuda("BQCD interface has not been built\n"); #endif } else { errorQuda("Clover field %d order not supported", in.Order()); } } #endif // this is the function that is actually called, from here on down we instantiate all required templates void copyGenericClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location, void *Out, void *In, void *outNorm, void *inNorm) { #ifdef GPU_CLOVER_DIRAC if (out.Precision() == QUDA_HALF_PRECISION && out.Order() > 4) errorQuda("Half precision not supported for order %d", out.Order()); if (in.Precision() == QUDA_HALF_PRECISION && in.Order() > 4) errorQuda("Half precision not supported for order %d", in.Order()); if (out.Precision() == QUDA_DOUBLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyClover<double,double,72>(out, in, inverse, location, (double*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<double,float,72>(out, in, inverse, location, (double*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<double,short,72>(out, in, inverse, location, (double*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } else if (out.Precision() == QUDA_SINGLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyClover<float,double,72>(out, in, inverse, location, (float*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<float,float,72>(out, in, inverse, location, (float*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<float,short,72>(out, in, inverse, location, (float*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } else if (out.Precision() == QUDA_HALF_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION){ copyClover<short,double,72>(out, in, inverse, location, (short*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<short,float,72>(out, in, inverse, location, (short*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<short,short,72>(out, in, inverse, location, (short*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } #else errorQuda("Clover has not been built"); #endif } } // namespace quda
b35799d417bcba05e6128d0aeba229871724dfee.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 4096 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); tmp_ptr = (void **)(&(ptr_array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { hipProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; hipError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 1 is %s\n", hipGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice); hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice); hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 2 is %s\n", hipGetErrorString(error_id)); } hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); hipDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); hipDeviceSynchronize(); ///hipDeviceSynchronize (); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 3 is %s\n", hipGetErrorString(error_id)); } /* copy results from GPU to CPU */ hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost); hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipDeviceSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ hipFree(d_a); hipFree(d_ptr_a); hipFree(duration); hipDeviceSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
b35799d417bcba05e6128d0aeba229871724dfee.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 4096 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); tmp_ptr = (void **)(&(ptr_array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
e143eb0f531525a6c2070f1034a6d488049a6319.hip
// !!! This is a file automatically generated by hipify!!! //fail:assertion //--blockDim=1024 --gridDim=1 --no-inline #include <stdio.h> #include <hip/hip_runtime.h> #include <assert.h> #define N 2//1024 typedef float(*funcType)(float*, unsigned int); __device__ float multiplyByTwo(float *v, unsigned int tid) { return v[tid] * 2.0f; } __device__ float divideByTwo(float *v, unsigned int tid) { return v[tid] * 0.5f; } __device__ funcType grabFunction(int i) { __requires(i != 0); //__ensures(__return_val_funptr(funcType) == divideByTwo); if (i == 0) return multiplyByTwo; else return divideByTwo; } __global__ void foo(float *v, unsigned int size, int i) { __requires(i != 0); unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; funcType f = grabFunction(i); if (tid < size) { float x = (*f)(v, tid); x += multiplyByTwo(v, tid); v[threadIdx.x] = x; } }
e143eb0f531525a6c2070f1034a6d488049a6319.cu
//fail:assertion //--blockDim=1024 --gridDim=1 --no-inline #include <stdio.h> #include <cuda.h> #include <assert.h> #define N 2//1024 typedef float(*funcType)(float*, unsigned int); __device__ float multiplyByTwo(float *v, unsigned int tid) { return v[tid] * 2.0f; } __device__ float divideByTwo(float *v, unsigned int tid) { return v[tid] * 0.5f; } __device__ funcType grabFunction(int i) { __requires(i != 0); //__ensures(__return_val_funptr(funcType) == divideByTwo); if (i == 0) return multiplyByTwo; else return divideByTwo; } __global__ void foo(float *v, unsigned int size, int i) { __requires(i != 0); unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; funcType f = grabFunction(i); if (tid < size) { float x = (*f)(v, tid); x += multiplyByTwo(v, tid); v[threadIdx.x] = x; } }
fc92af04313be38be186579b683b0dfce7353251.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" cudaStream_t stream[nStreams]; for (int i = 0; i < nStreams; ++i) hipStreamCreate(&stream[i]); for (int i = 0; i < nStreams; ++i) { hipMemcpyAsync(d_A[i], h_A[i], mem_size_A, hipMemcpyHostToDevice, stream[i]); hipMemcpyAsync(d_B[i], h_B[i], mem_size_B, hipMemcpyHostToDevice, stream[i]); } for (int i = 0; i < nStreams; ++i) hipLaunchKernelGGL(( matrixMulCUDA<block_size>), dim3(grid), dim3(threads), 0, stream[i], d_C[i], d_A[i], d_B[i], dimsA.x, dimsB.x); for (int i = 0; i < nStreams; ++i) hipMemcpyAsync(h_C[i], d_C[i], mem_size_C, hipMemcpyDeviceToHost, stream[i]);
fc92af04313be38be186579b683b0dfce7353251.cu
cudaStream_t stream[nStreams]; for (int i = 0; i < nStreams; ++i) cudaStreamCreate(&stream[i]); for (int i = 0; i < nStreams; ++i) { cudaMemcpyAsync(d_A[i], h_A[i], mem_size_A, cudaMemcpyHostToDevice, stream[i]); cudaMemcpyAsync(d_B[i], h_B[i], mem_size_B, cudaMemcpyHostToDevice, stream[i]); } for (int i = 0; i < nStreams; ++i) matrixMulCUDA<block_size><<<grid, threads, 0, stream[i]>>>(d_C[i], d_A[i], d_B[i], dimsA.x, dimsB.x); for (int i = 0; i < nStreams; ++i) cudaMemcpyAsync(h_C[i], d_C[i], mem_size_C, cudaMemcpyDeviceToHost, stream[i]);
1d7c714824a94ab470ea77948ff4a08f2252ec25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string> #define CHKMAL_ERROR if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } #define CHKMEMCPY_ERROR if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } #define CHKSYNC_ERROR if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize failed! Error code %d\n", cudaStatus); goto Error; } // arrSize indices; THREADS_PER_BLOCK * NO_BLOCKS total threads; // Each thread in charge of THREAD_BLOCK_SIZE contigeous indices #define NO_BLOCKS 5 #define THREADS_PER_BLOCK 1000 // Histogram code __global__ void threadedHistKernel(int *threadedHist, int *arr, const int blockSize, const int valRange, const int threadBlockSize) { int val, bid = blockIdx.x, tid = threadIdx.x, pid = bid*blockSize + tid; //positional ID // each thread takes info from its given info and increases the relevant position on the threadedHist for (int i = 0; i < threadBlockSize; i++) { val = arr[pid*threadBlockSize + i]; threadedHist[valRange*pid + val]++; } } __global__ void sumThreadedResultsKernel(long *dev_hist, int *dev_threadedHist, const int valRange, const int Blocks) { //e.g. tid from 0 to valRange-1, blocks = THREADS_PER_BLOCK * NO_BLOCKS int tid = threadIdx.x; for (int bl = 0; bl < Blocks; bl++) { dev_hist[tid] += dev_threadedHist[bl*valRange + tid]; } } hipError_t histogramWithCuda(long* hist, const int* largeArr, const int arrSize, const int histSize) { if (arrSize % (THREADS_PER_BLOCK * NO_BLOCKS) != 0) { fprintf(stderr, "histogramWithCuda launch failed:\n" "Array size (%d) modulo Total threads (%d) != 0.\n" "Try changing number of threads.\n", arrSize, (THREADS_PER_BLOCK * NO_BLOCKS)); goto Error; } const int THREAD_BLOCK_SIZE = arrSize / (THREADS_PER_BLOCK * NO_BLOCKS); int *dev_arr = 0; long *dev_hist = 0; int *dev_threadedHist = 0; hipError_t cudaStatus; // memory init block { cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers cudaStatus = hipMalloc((void**)&dev_arr, arrSize * sizeof(int)); CHKMAL_ERROR; cudaStatus = hipMalloc((void**)&dev_hist, histSize * sizeof(long)); CHKMAL_ERROR; cudaStatus = hipMalloc((void**)&dev_threadedHist, THREADS_PER_BLOCK * NO_BLOCKS * histSize * sizeof(int)); CHKMAL_ERROR; // each thread gets a "private" histogram // Copy input / memSet (Host to Device) cudaStatus = hipMemcpy(dev_arr, largeArr, arrSize * sizeof(int), hipMemcpyHostToDevice); CHKMEMCPY_ERROR; cudaStatus = hipMemcpy(dev_hist, hist, histSize * sizeof(int), hipMemcpyHostToDevice); CHKMEMCPY_ERROR; cudaStatus = hipMemset((void*)dev_threadedHist, 0, THREADS_PER_BLOCK * NO_BLOCKS * histSize * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!\n"); goto Error; } } // *** phase 1 *** // Launch a kernel on the GPU with one thread for every THREAD_BLOCK_SIZE elements. threadedHistKernel << <NO_BLOCKS, THREADS_PER_BLOCK >> >(dev_threadedHist, dev_arr, THREADS_PER_BLOCK, histSize, THREAD_BLOCK_SIZE); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "threadedHistKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipDeviceSynchronize(); CHKSYNC_ERROR; // *** phase 2 *** sumThreadedResultsKernel << <1, histSize >> >(dev_hist, dev_threadedHist, histSize, THREADS_PER_BLOCK * NO_BLOCKS); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "sumThreadedResultsKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipDeviceSynchronize(); CHKSYNC_ERROR; // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(hist, dev_hist, histSize * sizeof(int), hipMemcpyDeviceToHost); CHKMEMCPY_ERROR; Error: hipFree(dev_arr); hipFree(dev_hist); hipFree(dev_threadedHist); return cudaStatus; }
1d7c714824a94ab470ea77948ff4a08f2252ec25.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string> #define CHKMAL_ERROR if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } #define CHKMEMCPY_ERROR if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } #define CHKSYNC_ERROR if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize failed! Error code %d\n", cudaStatus); goto Error; } // arrSize indices; THREADS_PER_BLOCK * NO_BLOCKS total threads; // Each thread in charge of THREAD_BLOCK_SIZE contigeous indices #define NO_BLOCKS 5 #define THREADS_PER_BLOCK 1000 // Histogram code __global__ void threadedHistKernel(int *threadedHist, int *arr, const int blockSize, const int valRange, const int threadBlockSize) { int val, bid = blockIdx.x, tid = threadIdx.x, pid = bid*blockSize + tid; //positional ID // each thread takes info from its given info and increases the relevant position on the threadedHist for (int i = 0; i < threadBlockSize; i++) { val = arr[pid*threadBlockSize + i]; threadedHist[valRange*pid + val]++; } } __global__ void sumThreadedResultsKernel(long *dev_hist, int *dev_threadedHist, const int valRange, const int Blocks) { //e.g. tid from 0 to valRange-1, blocks = THREADS_PER_BLOCK * NO_BLOCKS int tid = threadIdx.x; for (int bl = 0; bl < Blocks; bl++) { dev_hist[tid] += dev_threadedHist[bl*valRange + tid]; } } cudaError_t histogramWithCuda(long* hist, const int* largeArr, const int arrSize, const int histSize) { if (arrSize % (THREADS_PER_BLOCK * NO_BLOCKS) != 0) { fprintf(stderr, "histogramWithCuda launch failed:\n" "Array size (%d) modulo Total threads (%d) != 0.\n" "Try changing number of threads.\n", arrSize, (THREADS_PER_BLOCK * NO_BLOCKS)); goto Error; } const int THREAD_BLOCK_SIZE = arrSize / (THREADS_PER_BLOCK * NO_BLOCKS); int *dev_arr = 0; long *dev_hist = 0; int *dev_threadedHist = 0; cudaError_t cudaStatus; // memory init block { cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers cudaStatus = cudaMalloc((void**)&dev_arr, arrSize * sizeof(int)); CHKMAL_ERROR; cudaStatus = cudaMalloc((void**)&dev_hist, histSize * sizeof(long)); CHKMAL_ERROR; cudaStatus = cudaMalloc((void**)&dev_threadedHist, THREADS_PER_BLOCK * NO_BLOCKS * histSize * sizeof(int)); CHKMAL_ERROR; // each thread gets a "private" histogram // Copy input / memSet (Host to Device) cudaStatus = cudaMemcpy(dev_arr, largeArr, arrSize * sizeof(int), cudaMemcpyHostToDevice); CHKMEMCPY_ERROR; cudaStatus = cudaMemcpy(dev_hist, hist, histSize * sizeof(int), cudaMemcpyHostToDevice); CHKMEMCPY_ERROR; cudaStatus = cudaMemset((void*)dev_threadedHist, 0, THREADS_PER_BLOCK * NO_BLOCKS * histSize * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!\n"); goto Error; } } // *** phase 1 *** // Launch a kernel on the GPU with one thread for every THREAD_BLOCK_SIZE elements. threadedHistKernel << <NO_BLOCKS, THREADS_PER_BLOCK >> >(dev_threadedHist, dev_arr, THREADS_PER_BLOCK, histSize, THREAD_BLOCK_SIZE); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "threadedHistKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); CHKSYNC_ERROR; // *** phase 2 *** sumThreadedResultsKernel << <1, histSize >> >(dev_hist, dev_threadedHist, histSize, THREADS_PER_BLOCK * NO_BLOCKS); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "sumThreadedResultsKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); CHKSYNC_ERROR; // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(hist, dev_hist, histSize * sizeof(int), cudaMemcpyDeviceToHost); CHKMEMCPY_ERROR; Error: cudaFree(dev_arr); cudaFree(dev_hist); cudaFree(dev_threadedHist); return cudaStatus; }
1b6af5d01dfb6d2d93c7f9fa33e20e46e7d7bbc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************************** deldopoffs.c Takes the delay-correction polynomial for a delay-doppler set and figures out the COM delay and doppler corrections (in units of image rows and columns) for each frame. Modified 2015 June 3 by CM: Implement smearing for the "fit" and "write" actions Modified 2006 June 21 by CM: Changed delres to del_per_pixel and dopres to dop_per_pixel *****************************************************************************************/ extern "C" { #include "../shape/head.h" } __global__ void deldopoffs_krnl(struct dat_t *ddat, int s, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int k, n; double del, dop, arg, x; if (f < nframes) { for (k=0; k<ddat->set[s].desc.deldop.nviews; k++) { x = 1.0; dop = 0.0; del = ddat->set[s].desc.deldop.delcor.a[0].val; arg = ddat->set[s].desc.deldop.frame[f].view[k].t - ddat->set[s].desc.deldop.delcor.t0; for (n=1; n<=ddat->set[s].desc.deldop.delcor.n; n++) { dop += n*ddat->set[s].desc.deldop.delcor.a[n].val*x; del += ddat->set[s].desc.deldop.delcor.a[n].val*(x*=arg); } /* del has units of usec */ ddat->set[s].desc.deldop.frame[f].view[k].deloff = del/ddat->set[s].desc.deldop.del_per_pixel; /* dop has units of usec/day and there are 86400 sec/day */ ddat->set[s].desc.deldop.frame[f].view[k].dopoff = -dop*ddat->set[s].desc.deldop.Ftx / (ddat->set[s].desc.deldop.dop_per_pixel*86400.0); } } } __host__ void deldopoffs_gpu(struct dat_t *ddat, int s, int nframes) { dim3 BLK,THD; /* Launch nframes-threaded kernel */ THD.x = nframes; hipLaunchKernelGGL(( deldopoffs_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, s, nframes); checkErrorAfterKernelLaunch("deldopoffs_cuda_krnl (deldopoffs_cuda)"); }
1b6af5d01dfb6d2d93c7f9fa33e20e46e7d7bbc2.cu
/***************************************************************************************** deldopoffs.c Takes the delay-correction polynomial for a delay-doppler set and figures out the COM delay and doppler corrections (in units of image rows and columns) for each frame. Modified 2015 June 3 by CM: Implement smearing for the "fit" and "write" actions Modified 2006 June 21 by CM: Changed delres to del_per_pixel and dopres to dop_per_pixel *****************************************************************************************/ extern "C" { #include "../shape/head.h" } __global__ void deldopoffs_krnl(struct dat_t *ddat, int s, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int k, n; double del, dop, arg, x; if (f < nframes) { for (k=0; k<ddat->set[s].desc.deldop.nviews; k++) { x = 1.0; dop = 0.0; del = ddat->set[s].desc.deldop.delcor.a[0].val; arg = ddat->set[s].desc.deldop.frame[f].view[k].t - ddat->set[s].desc.deldop.delcor.t0; for (n=1; n<=ddat->set[s].desc.deldop.delcor.n; n++) { dop += n*ddat->set[s].desc.deldop.delcor.a[n].val*x; del += ddat->set[s].desc.deldop.delcor.a[n].val*(x*=arg); } /* del has units of usec */ ddat->set[s].desc.deldop.frame[f].view[k].deloff = del/ddat->set[s].desc.deldop.del_per_pixel; /* dop has units of usec/day and there are 86400 sec/day */ ddat->set[s].desc.deldop.frame[f].view[k].dopoff = -dop*ddat->set[s].desc.deldop.Ftx / (ddat->set[s].desc.deldop.dop_per_pixel*86400.0); } } } __host__ void deldopoffs_gpu(struct dat_t *ddat, int s, int nframes) { dim3 BLK,THD; /* Launch nframes-threaded kernel */ THD.x = nframes; deldopoffs_krnl<<<BLK,THD>>>(ddat, s, nframes); checkErrorAfterKernelLaunch("deldopoffs_cuda_krnl (deldopoffs_cuda)"); }
f1628857516d3b3621205edd84984767a8a6e7d3.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include "hip/hip_runtime.h" #include "matAddcuda.h" /** * CUDA Kernel Device code * Computes the matrix addition of a and b into c. */ __global__ void matrixAdd(float *a, float *b, float *c, int n2) { int x = blockIdx.x; int y = blockIdx.y; int i = (n2*y) + x; c[i] = a[i] + b[i]; }
f1628857516d3b3621205edd84984767a8a6e7d3.cu
#include "stdio.h" #include "cuda_runtime.h" #include "matAddcuda.h" /** * CUDA Kernel Device code * Computes the matrix addition of a and b into c. */ __global__ void matrixAdd(float *a, float *b, float *c, int n2) { int x = blockIdx.x; int y = blockIdx.y; int i = (n2*y) + x; c[i] = a[i] + b[i]; }
44b4eecbd7867a897840f73a9b758e28785eef3b.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include "matrixmul.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include "helper_cuda.h" #include "helper_string.h" #include <windows.h> #include<fstream> #include<iostream> #include<iomanip> using namespace std; #define PI 3.14159265 #define deltaT 0.0075 #define paraA 0.35 #define WIDTH 32 #define SIZE 1024 __constant__ float deltaX[1]; __constant__ float deltaY[1]; __global__ void periodicalize(Matrix in); __device__ float laplaceCal(float front, float back, float deltaX, float deltaY, float num); __device__ float frontCal(float *in); __device__ float backCal(float *in); __device__ float* getFOI(Matrix in, int i); __device__ float* foiPowOf3(float *foi); __device__ void getNowOi(Matrix out, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32, int i); __global__ void allCal(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32); __global__ void firstCal(Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi32); __device__ float laplaceCal_r(float *in, float deltaX, float deltaY, float num); __global__ void callThree(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32); __global__ void calTwo(Matrix tempRecordOi2, Matrix tempRecordOi4); __global__ void calOne(Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi32); __device__ float* getFOIf(float *in, int i, int width); __global__ void allCal_new(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { SYSTEMTIME sysstart; SYSTEMTIME sysstop; GetLocalTime(&sysstart); float oi0 = 0.3; float qh = sqrt(3.0) / 2; int optionOi = 1; int areaX = 5; int areaY = 5; int numT = 10; float deltaX0 = PI / 4; float deltaY0 = PI / 4; int nucleusR = 11; float strainR = 1e-5 / deltaT; //printf("strainR%f", strainR); float totalT = numT*deltaT; int numX = areaX / 0.7850; int numY = areaY / 0.7850; //printf("numX: %d, numY:%d\n", numX, numY); Matrix oi_host = AllocateMatrix(numX + 2, numY + 2, oi0); Matrix axisX = AllocateMatrix((numX + 2), numY + 2, 1.0); Matrix axisY = AllocateMatrix((numY + 2), numX + 2, 1.0); Matrix temp_matrix = AllocateMatrix(numX + 2, numY + 2, 0.0); Matrix chooseX = AllocateMatrix(numX + 2, numY + 2, 0); Matrix nucleusOi = AllocateMatrix(numX + 2, numY + 2, 0); for (int i = 0; i < numX + 2; i++){ float temp = 0.7850*((i + 1) - numX / 2); for (int j = 0; j < numY + 2; j++){ axisX.elements[j*(numX + 2) + i] = temp; } } for (int i = numY + 2; i >= 1; i--){ float temp = 0.7850*(i - numY / 2); for (int j = 0; j < numX + 2; j++){ axisY.elements[(numY + 2 - i)*(numX + 2) + j] = temp; } } for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ temp_matrix.elements[i*(numX + 2) + j] = axisX.elements[i*(numX + 2) + j] * axisX.elements[i*(numX + 2) + j] + axisY.elements[i*(numX + 2) + j] * axisY.elements[i*(numX + 2) + j]; } } printf("\n***temp_matrix***\n"); //test temp_matrix /*for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", temp_matrix.elements[i*(numX + 2) + j]); } printf("\n"); }*/ for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ if (temp_matrix.elements[i*(numX + 2) + j] <= (nucleusR*nucleusR)){ chooseX.elements[i*(numX + 2) + j] = 1; } } } printf("\n***chooseX***\n"); //test chooseX /*for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", chooseX.elements[i*(numX + 2) + j]); } printf("\n"); }*/ //float At = -1*4 / 5 * (oi0 + sqrt(5 / 3 * paraA - 4 * oi0 *oi0)); float At = (oi0 + sqrt(5 / 3.0*paraA - 4 * oi0*oi0))*(-0.8); for (int i = 0; i < numX + 2; i++){ for (int j = 0; j < numY + 2; j++){ axisX.elements[i*(numX + 2) + j] = axisX.elements[i*(numX + 2) + j] * 0.9659 + axisY.elements[i*(numX + 2) + j] * 0.2588; } } for (int i = 0; i < numX + 2; i++){ for (int j = 0; j < numY + 2; j++){ axisY.elements[i*(numX + 2) + j] = -axisX.elements[i*(numX + 2) + j] * 0.2588 + axisY.elements[i*(numX + 2) + j] * 0.9659; } } printf("\n***axisX***\n"); //test axisX /* for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", axisX.elements[i*(numX + 2) + j]); } printf("\n"); } */ printf("\n***axisY***\n"); //test axisY /* for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", axisY.elements[i*(numX + 2) + j]); } printf("\n"); } */ for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ nucleusOi.elements[i*(numX + 2) + j] = At* (cos(qh*axisX.elements[i*(numX + 2) + j])*cos(1 / sqrt(3.0)*qh*axisY.elements[i*(numX + 2) + j]) + 0.5 * cos(2 / sqrt(3.0)*qh*axisY.elements[i*(numX + 2) + j])); } } printf("\n***nucleusOi***\n"); //test nucleusOi /*for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", nucleusOi.elements[i*(numX + 2) + j]); } printf("\n"); }*/ for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ oi_host.elements[i*(numX + 2) + j] += chooseX.elements[i*(numX + 2) + j] * nucleusOi.elements[i*(numX + 2) + j]; } } printf("\n***oi_host***\n"); //test oi_host /*for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); }*/ Matrix newoi = AllocateDeviceMatrix(oi_host); //Matrix u = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); Matrix oi = AllocateDeviceMatrix(oi_host); Matrix tempRecordOi2 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi4 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi6 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi32 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); for (int i = 0; i < numT; i++) { printf("round: %d\n", i); //printf("********oi before period************\n"); //update oi_host //periodicalize x for (int j = 0; j < numX + 2; j++){ oi_host.elements[0 * (numX + 2) + j] = oi_host.elements[numY*(numX + 2) + j]; } for (int j = 0; j < numX + 2; j++){ oi_host.elements[(numY + 1)*(numX + 2) + j] = oi_host.elements[1 * (numX + 2) + j]; } //periodicalize y for (int j = 0; j < numY + 2; j++){ oi_host.elements[j*(numX + 2) + 0] = oi_host.elements[j*(numX + 2) + numX]; } for (int j = 0; j < numY + 2; j++){ oi_host.elements[j*(numX + 2) + numX + 1] = oi_host.elements[j*(numX + 2) + 1]; } //test after periodicalize /*printf("oi after periodicalize!\n"); for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); }*/ //Matrix oi = AllocateDeviceMatrix(oi_host); /*Matrix tempRecordOi2 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi4 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi6 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi32 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); */ float tempx[1] = { deltaX0 + i*0.00005 }; float tempy[1] = { deltaY0*(deltaX0 / tempx[0]) }; hipMemcpyToSymbol(deltaX, &tempx, sizeof(float)); hipMemcpyToSymbol(deltaY, &tempy, sizeof(float)); //hipDeviceSynchronize(); /* hipMemcpy(&0.785, tempx, sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&0.785, tempy, sizeof(float), hipMemcpyDeviceToHost); printf("After tempx:%f\n", *tempx); printf("After tempy:%f\n", *tempy); */ //free(tempx); //free(tempy); CopyToDeviceMatrix(oi, oi_host); //CopyFromDeviceMatrix(oi_host, oi); /*printf("********oi before calculation:************\n"); for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); }*/ ////////////////////////////////////////////////////////// dim3 block_size(WIDTH, WIDTH); int grid_rows = oi.height / WIDTH + (oi.height % WIDTH ? 1 : 0); int grid_cols = oi.width / WIDTH + (oi.width % WIDTH ? 1 : 0); dim3 grid_size(grid_cols, grid_rows); hipLaunchKernelGGL(( allCal), dim3(1), dim3(64) , 0, 0, newoi, oi, tempRecordOi2, tempRecordOi4, tempRecordOi6, tempRecordOi32); //allCal << < 1, SIZE >> >(newoi, oi, tempRecordOi2, tempRecordOi4, tempRecordOi6, tempRecordOi32); //allCal_new << < 1, SIZE >> >(newoi, oi, tempRecordOi2, tempRecordOi4, tempRecordOi6, tempRecordOi32); //hipDeviceSynchronize(); //firstCal << < 1, 64 >> >(oi, tempRecordOi2, tempRecordOi32); /*calOne << <64, SIZE >> >(oi, tempRecordOi2, tempRecordOi32); hipDeviceSynchronize(); calTwo << <64, SIZE >> >(tempRecordOi2, tempRecordOi4); hipDeviceSynchronize(); callThree << <64, SIZE >> >(newoi, oi, tempRecordOi2, tempRecordOi4, tempRecordOi6, tempRecordOi32);*/ hipDeviceSynchronize(); Check_CUDA_Error("Kernel Execution Failed!"); ///////////////////////////////////////////////////////// CopyFromDeviceMatrix(oi_host, newoi); //CopyFromDeviceMatrix(oi_host, tempRecordOi2); /*printf("********oi after calculation:************\n"); for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); }*/ /*if (i == numT - 1){ printf("Free device matrix!\n"); FreeDeviceMatrix(&tempRecordOi2); FreeDeviceMatrix(&tempRecordOi4); FreeDeviceMatrix(&tempRecordOi6); FreeDeviceMatrix(&tempRecordOi32); FreeDeviceMatrix(&oi); FreeDeviceMatrix(&newoi); } */ } //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //float elapsedTime; //hipEventElapsedTime(&elapsedTime, start, stop); GetLocalTime(&sysstop); for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); } // printf("Processing Time: %3.1f ms \n", elapsedTime); printf("Processing Time: %d ms \n", (sysstop.wMilliseconds + sysstop.wSecond * 1000 + sysstop.wMinute * 60000) - (sysstart.wMilliseconds + sysstart.wSecond * 1000 + sysstart.wMinute * 60000)); //ofstream ofile; // //ofile.open("d:\\myfile.txt"); // //for (int i = 0; numY + 2; i++){} // for (int j = 0; j < numX + 2; j++){ // ofile << oi_host.elements[i*(numX + 2) + j] << "\t"; // // } // ofile << endl; //} //ofile.close(); // /* printf("Free host matrix!"); FreeMatrix(&axisX); FreeMatrix(&axisY); FreeMatrix(&temp_matrix); FreeMatrix(&chooseX); FreeMatrix(&nucleusOi); */ return 0; } __global__ void periodicalize(Matrix in) { int i = blockIdx.x * blockDim.x + threadIdx.x; float frontRow1 = in.elements[0 + i]; float frontRow2 = in.elements[in.width + i]; float backRow1 = in.elements[in.width * (in.height - 2) + i]; float backRow2 = in.elements[in.width * (in.height - 1) + i]; float leftCol1 = in.elements[in.width * i + 0]; float leftCol2 = in.elements[in.width * i + 1]; float rightCol1 = in.elements[in.width * i + in.width - 2]; float rightCol2 = in.elements[in.width * i + in.width - 1]; in.elements[0 + i] = backRow1; in.elements[in.width + i] = backRow2; in.elements[in.width * (in.height - 2) + i] = frontRow1; in.elements[in.width * (in.height - 1) + i] = frontRow2; in.elements[in.width * i + 0] = rightCol1; in.elements[in.width * i + 1] = rightCol2; in.elements[in.width * i + in.width - 2] = leftCol1; in.elements[in.width * i + in.width - 1] = leftCol2; } __device__ float laplaceCal(float front, float back, float deltaX, float deltaY, float num) { //printf("front:%f, back:%f", front, back); float res = (front / powf(0.785, num)) + (back / powf(0.785, num)); //printf("laplace res: %f", res); return res; } __device__ float laplaceCal_r(float *in, float deltaX, float deltaY, float num){ float res = ((0.125 * (in[2 * 3 + 2] + in[0 * 3 + 2] + in[2 * 3 + 0] + in[0 * 3 + 0]) + 0.75 * (in[2 * 3 + 1] + in[0 * 3 + 1]) - 0.25 * (in[2 * 3 + 1] + in[0 * 3 + 1]) - 1.5 * in[1 * 3 + 1])) / powf(deltaX, num) + (0.125*(in[2 * 3 + 2] + in[0*3+2]+in[2*3+0]+in[0*3+0])+ 0.75*(in[1*3+2]+in[1*3+0])-0.25*(in[1*3+2]+in[1*3+0])-1.5*in[1*3+1])/powf(deltaY,num); //printf("new laplace: %f", res); //printf("deltaX: %f ", deltaX); //printf("deltaY: %f ", deltaY); return res; } __device__ float frontCal(float *in) { float res = 0.125 * (in[2*3 + 2] + in[0*3 + 2] + in[2*3 + 0]+in[0*3+0]) + 0.75 * (in[2*3 + 1] + in[0*3 + 1]) - 0.25 * (in[2*3 + 1] + in[0*3 + 1]) - 1.5 * in[1*3 + 1]; //printf("front:%f\t", res); } __device__ float backCal(float *in) { float res = 0.125 * (in[2 * 3 + 2] + in[0 * 3 + 2] + in[2 * 3 + 0] + in[0 * 3 + 0]) + 0.75 * (in[1*3 + 2] + in[1*3 + 0]) - 0.25 * (in[1*3 + 2] + in[1*3 + 0]) - 1.5 * in[1*3 + 1]; //printf("back:%f\t", res); } __device__ float* getFOI(Matrix in, int i) { //how about using shared mem? //printf("i: %d\n", i); float foi[9]; foi[0 + 0] = in.elements[i - in.width - 1]; foi[0 + 1] = in.elements[i - in.width]; foi[0 + 2] = in.elements[i - in.width + 1]; foi[1*3 + 0] = in.elements[i - 1]; foi[1*3+ 1] = in.elements[i]; foi[1*3+ 2] = in.elements[i + 1]; foi[2*3 + 0] = in.elements[i + in.width - 1]; foi[2*3 + 1] = in.elements[i + in.width]; foi[2*3 + 2] = in.elements[i + in.width + 1]; return foi; } __device__ float* getFOIf(float *in, int i, int width) { //how about using shared mem? //printf("i: %d\n", i); float foi[9]; foi[0 + 0] = in[i - width - 1]; foi[0 + 1] = in[i - width]; foi[0 + 2] = in[i - width + 1]; foi[1 * 3 + 0] = in[i - 1]; foi[1 * 3 + 1] = in[i]; foi[1 * 3 + 2] = in[i + 1]; foi[2 * 3 + 0] = in[i + width - 1]; foi[2 * 3 + 1] = in[i + width]; foi[2 * 3 + 2] = in[i + width + 1]; return foi; } __device__ float* foiPowOf3(float *foi) { float threefoi[9]; //float foithree[9]; /* foithree[0 + 0] = powf(foi[0], 3.0); foithree[0 + 1] = powf(foi[1], 3.0); foithree[0 + 2] = powf(foi[2], 3.0); foithree[1 * 3 + 0] = powf(foi[3], 3.0); foithree[1 * 3 + 1] = powf(foi[4], 3.0); foithree[1 * 3 + 2] = powf(foi[5], 3.0); foithree[2 * 3 + 0] = powf(foi[6], 3.0); foithree[2 * 3 + 1] = powf(foi[7], 3.0); foithree[2 * 3 + 2] = powf(foi[8], 3.0); */ threefoi[0] = foi[0] * foi[0] * foi[0]; threefoi[1] = foi[1] * foi[1] * foi[1]; threefoi[2] = foi[2] * foi[2] * foi[2]; threefoi[3] = foi[3] * foi[3] * foi[3]; threefoi[4] = foi[4] * foi[4] * foi[4]; threefoi[5] = foi[5] * foi[5] * foi[5]; threefoi[6] = foi[6] * foi[6] * foi[6]; threefoi[7] = foi[7] * foi[7] * foi[7]; threefoi[8] = foi[8] * foi[8] * foi[8]; return threefoi; } __device__ void getNowOi(Matrix out, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32, int i) { out.elements[i] = oi.elements[i] + deltaT * ((1 - paraA) * tempRecordOi2.elements[i] + 2 * tempRecordOi4.elements[i] + tempRecordOi6.elements[i] + tempRecordOi32.elements[i]); } __global__ void allCal(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32) { //consider using tile? // int col = blockIdx.x * blockDim.x + threadIdx.x; // int row = blockIdx.y * blockDim.y + threadIdx.y; //use col and row represent i? int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; //int blockId = blockIdx.x + blockIdx.y * gridDim.x; //int i = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; //int i = blockIdx.x *blockDim.x + threadIdx.x; //printf("i: %d\t", i); float x = deltaX[0]; float y = deltaY[0]; newOi.elements[i] = oi.elements[i]; //float *foi = getFOI(oi, i); float *in = getFOI(oi, i); __syncthreads(); /*if (i == 9){ printf("oi:\n"); printf("%f %f %f\n", in[0], in[1], in[2]); printf("%f %f %f\n", in[3], in[4], in[5]); printf("%f %f %f\n", in[6], in[7], in[8]); } */ if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { //tempOi2 = laplaceCal_r(foi, x, y, 2.0); float tempOi2 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi2.elements[i] = tempOi2; __syncthreads(); if (i == 9){ printf("tempoi2:\n"); for (int j = 0; j < 64; j++){ printf("%f\t", tempRecordOi2.elements[j]); if (j % 8 == 7){ printf("\n"); } } } //float *threefoi = foiPowOf3(foi); //tempOi32 = laplaceCal_r(threefoi, x, y, 2.0); float tempOi32 = ((0.125 * (in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 0.25 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 1.5 * in[4] * in[4] * in[4]) / (x*x)) + ((0.125*(in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 0.25*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 1.5*in[4] * in[4] * in[4]) / (y*y)); tempRecordOi32.elements[i] = tempOi32; __syncthreads(); if (i == 9){ printf("tempoi32:\n"); for (int j = 0; j < 64; j++){ printf("%f\t", tempRecordOi32.elements[j]); if (j % 8 == 7){ printf("\n"); } } } //float *twofoi = getFOI(tempRecordOi2, i); float *tin = getFOI(tempRecordOi2, i); //float tempOi4 = laplaceCal_r(twofoi, x, y, 2.0); float tempOi4 = ((0.125 * (tin[8] + tin[2] + tin[6] + tin[0]) + 0.75 * (tin[7] + tin[1]) - 0.25 * (tin[7] + tin[1]) - 1.5 * tin[4]) / (x*x)) + ((0.125*(tin[8] + tin[2] + tin[6] + tin[0]) + 0.75*(tin[5] + tin[3]) - 0.25*(tin[5] + tin[3]) - 1.5*tin[4]) / (y*y)); tempRecordOi4.elements[i] = tempOi4; __syncthreads(); if (i == 9){ printf("tempoi4:\n"); for (int j = 0; j < 64; j++){ printf("%f\t", tempRecordOi4.elements[j]); if (j % 8 == 7){ printf("\n"); } } } /*if (i == 9){ printf("tempoi2:\n"); printf("%f %f %f\n", twofoi[0], twofoi[1], twofoi[2]); printf("%f %f %f\n", twofoi[3], twofoi[4], twofoi[5]); printf("%f %f %f\n", twofoi[6], twofoi[7], twofoi[8]); } */ //float *fourfoi = getFOI(tempRecordOi4, i); float *fin = getFOI(tempRecordOi4, i); //float tempOi6 = laplaceCal_r(fourfoi, x, y, 2.0); float tempOi6 = ((0.125 * (fin[8] + fin[2] + fin[6] + fin[0]) + 0.75 * (fin[7] + fin[1]) - 0.25 * (fin[7] + fin[1]) - 1.5 * fin[4]) / (x*x)) + ((0.125*(fin[8] + fin[2] + fin[6] + fin[0]) + 0.75*(fin[5] + fin[3]) - 0.25*(fin[5] + fin[3]) - 1.5*fin[4]) / (y*y)); tempRecordOi6.elements[i] = tempOi6; if (i == 9){ printf("tempoi6:\n"); for (int j = 0; j < 64; j++){ printf("%f\t", tempRecordOi6.elements[j]); if (j % 8 == 7){ printf("\n"); } } } newOi.elements[i] = oi.elements[i] + deltaT * ((1 - paraA) * tempOi2 + 2 * tempOi4 + tempOi6 + tempOi32); } } __global__ void allCal_new(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32){ int i = blockIdx.x *blockDim.x + threadIdx.x; float x = deltaX[0]; float y = deltaY[0]; __shared__ float temp2[SIZE]; temp2[i] = oi.elements[i]; __syncthreads(); if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { float *in = getFOIf(temp2, i, oi.width); float tempOi2 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi2.elements[i] = tempOi2; float tempOi32 = ((0.125 * (in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 0.25 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 1.5 * in[4] * in[4] * in[4]) / (x*x)) + ((0.125*(in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 0.25*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 1.5*in[4] * in[4] * in[4]) / (y*y)); tempRecordOi32.elements[i] = tempOi32; __syncthreads(); } __shared__ float temp4[SIZE]; temp4[i] = tempRecordOi2.elements[i]; __syncthreads(); if (!(i%tempRecordOi2.width == 0 || i%tempRecordOi2.width == (tempRecordOi2.width - 1) || (i >= 0 && i <= (tempRecordOi2.width - 1)) || (i <= (tempRecordOi2.height*tempRecordOi2.width - 1) && i >= (tempRecordOi2.height - 1)*tempRecordOi2.width) || i >= tempRecordOi2.height*tempRecordOi2.width)) { float *in = getFOIf(temp4, i, tempRecordOi2.width); float tempOi4 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi4.elements[i] = tempOi4; __syncthreads(); } newOi.elements[i] = oi.elements[i]; __shared__ float temp6[SIZE]; temp6[i] = tempRecordOi4.elements[i]; __syncthreads(); if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { float x = deltaX[0]; float y = deltaY[0]; float *in = getFOIf(temp6, i, oi.width); float tempOi6 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi6.elements[i] = tempOi6; __syncthreads(); newOi.elements[i] = oi.elements[i] + deltaT * ((1 - paraA) * tempRecordOi2.elements[i] + 2 * tempRecordOi4.elements[i] + tempOi6 + tempRecordOi32.elements[i]); } } __global__ void firstCal(Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi32){ //consider using tile? //int col = blockIdx.x * blockDim.x + threadIdx.x; //int row = blockIdx.y * blockDim.y + threadIdx.y; //use col and row represent i? //int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; int i = blockIdx.x *blockDim.x + threadIdx.x; if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width)||i>=oi.height*oi.width)){ float *foi = getFOI(oi, i); tempRecordOi2.elements[i] = laplaceCal(frontCal(foi), backCal(foi), 0.785, 0.785, 2.0); float *foithree = foiPowOf3(foi); tempRecordOi32.elements[i] = laplaceCal(frontCal(foithree), backCal(foithree), 0.785, 0.785, 2.0); } } __global__ void calOne(Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi32) { int i = blockIdx.x *blockDim.x + threadIdx.x; /*if (i == 9){ printf("oi!\n"); for (int i = 0; i < 8; i++){ for (int j = 0; j < 8; j++){ printf("%f\t", oi.elements[i*(8) + j]); } printf("\n"); } }*/ __shared__ float temp2[SIZE]; temp2[i] = oi.elements[i]; __syncthreads(); if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { /*if (i == 9){ printf("temp2 print!\n"); for (int i = 0; i < 64; i++){ printf("%f\t", temp2[i]); if (i % 8 == 7) printf("\n"); } } */ float *in = getFOIf(temp2, i, oi.width); __syncthreads(); /* if (i == 9){ printf("i:%d check:%f \n", i,in[0]); printf("i:%d check:%f \n", i, in[1]); printf("i:%d check:%f \n", i, in[2]); printf("i:%d check:%f \n", i, in[3]); printf("i:%d check:%f \n", i, in[4]); printf("i:%d check:%f \n", i, in[5]); printf("i:%d check:%f \n", i, in[6]); printf("i:%d check:%f \n", i, in[7]); printf("i:%d check:%f \n", i, in[8]); }*/ /* printf("************getFOI***************\n"); for (int i = 0; i < 9; i++){ printf("foi[%d]:%f\t", i, foi[i]); }*/ float x = deltaX[0]; float y = deltaY[0]; /*if (i == 9){ printf("x:%f, y:%f \n",x,y); }*/ //printf("deltaX=%f\tdeltaY=%f\n", deltaX, deltaY); //tempRecordOi2.elements[i] = laplaceCal_r(foi, x, y, 2.0); float tempOi2 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi2.elements[i] = tempOi2; __syncthreads(); //printf("tempOi2:%f\t", tempOi2); //float *threefoi = foiPowOf3(in); //printf("in: %f, oi3:%f", in[5], threefoi[5]); //float tempOi32 = laplaceCal(frontCal(foi3), backCal(foi3), 0.785, 0.785, 2.0); //float tempOi32 = laplaceCal_r(threefoi, x, y, 2.0); float tempOi32 = ((0.125 * (in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 0.25 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 1.5 * in[4] * in[4] * in[4]) / (x*x)) + ((0.125*(in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 0.25*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 1.5*in[4] * in[4]*in[4]) / (y*y)); tempRecordOi32.elements[i] = tempOi32; } } __global__ void calTwo(Matrix tempRecordOi2, Matrix tempRecordOi4) { int i = blockIdx.x *blockDim.x + threadIdx.x; __shared__ float temp4[SIZE]; temp4[i] = tempRecordOi2.elements[i]; __syncthreads(); if (!(i%tempRecordOi2.width == 0 || i%tempRecordOi2.width == (tempRecordOi2.width - 1) || (i >= 0 && i <= (tempRecordOi2.width - 1)) || (i <= (tempRecordOi2.height*tempRecordOi2.width - 1) && i >= (tempRecordOi2.height - 1)*tempRecordOi2.width) || i >= tempRecordOi2.height*tempRecordOi2.width)) { float x = deltaX[0]; float y = deltaY[0]; //float *twofoi = getFOI(tempRecordOi2, i); //float tempOi4 = laplaceCal(frontCal(foi2), backCal(foi2), 0.785, 0.785, 2); float *in = getFOIf(temp4, i, tempRecordOi2.width); float tempOi4 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi4.elements[i] = tempOi4; __syncthreads(); //float tempOi4 = laplaceCal_r(twofoi, x, y, 2.0); //tempRecordOi4.elements[i] = tempOi4; //printf("tempOi4:%f\t", tempOi4); } } __global__ void callThree(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32) { int i = blockIdx.x *blockDim.x + threadIdx.x; //printf("i: %d\t", i); newOi.elements[i] = oi.elements[i]; __shared__ float temp6[SIZE]; temp6[i] = tempRecordOi4.elements[i]; __syncthreads(); if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { float x = deltaX[0]; float y = deltaY[0]; //float *fourfoi = getFOI(tempRecordOi4, i); //float tempOi6 = laplaceCal(frontCal(foi4), backCal(foi4), 0.785, 0.785, 2); //float tempOi6 = laplaceCal_r(fourfoi, x, y, 2.0); //printf("tempOi6:%f\t", tempOi6); //tempRecordOi6.elements[i] = tempOi6; float *in = getFOIf(temp6, i, oi.width); float tempOi6 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi6.elements[i] = tempOi6; __syncthreads(); newOi.elements[i] = oi.elements[i] + deltaT * ((1 - paraA) * tempRecordOi2.elements[i] + 2 * tempRecordOi4.elements[i] + tempOi6 + tempRecordOi32.elements[i]); } }
44b4eecbd7867a897840f73a9b758e28785eef3b.cu
#include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include "matrixmul.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include "helper_cuda.h" #include "helper_string.h" #include <windows.h> #include<fstream> #include<iostream> #include<iomanip> using namespace std; #define PI 3.14159265 #define deltaT 0.0075 #define paraA 0.35 #define WIDTH 32 #define SIZE 1024 __constant__ float deltaX[1]; __constant__ float deltaY[1]; __global__ void periodicalize(Matrix in); __device__ float laplaceCal(float front, float back, float deltaX, float deltaY, float num); __device__ float frontCal(float *in); __device__ float backCal(float *in); __device__ float* getFOI(Matrix in, int i); __device__ float* foiPowOf3(float *foi); __device__ void getNowOi(Matrix out, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32, int i); __global__ void allCal(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32); __global__ void firstCal(Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi32); __device__ float laplaceCal_r(float *in, float deltaX, float deltaY, float num); __global__ void callThree(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32); __global__ void calTwo(Matrix tempRecordOi2, Matrix tempRecordOi4); __global__ void calOne(Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi32); __device__ float* getFOIf(float *in, int i, int width); __global__ void allCal_new(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { SYSTEMTIME sysstart; SYSTEMTIME sysstop; GetLocalTime(&sysstart); float oi0 = 0.3; float qh = sqrt(3.0) / 2; int optionOi = 1; int areaX = 5; int areaY = 5; int numT = 10; float deltaX0 = PI / 4; float deltaY0 = PI / 4; int nucleusR = 11; float strainR = 1e-5 / deltaT; //printf("strainR%f", strainR); float totalT = numT*deltaT; int numX = areaX / 0.7850; int numY = areaY / 0.7850; //printf("numX: %d, numY:%d\n", numX, numY); Matrix oi_host = AllocateMatrix(numX + 2, numY + 2, oi0); Matrix axisX = AllocateMatrix((numX + 2), numY + 2, 1.0); Matrix axisY = AllocateMatrix((numY + 2), numX + 2, 1.0); Matrix temp_matrix = AllocateMatrix(numX + 2, numY + 2, 0.0); Matrix chooseX = AllocateMatrix(numX + 2, numY + 2, 0); Matrix nucleusOi = AllocateMatrix(numX + 2, numY + 2, 0); for (int i = 0; i < numX + 2; i++){ float temp = 0.7850*((i + 1) - numX / 2); for (int j = 0; j < numY + 2; j++){ axisX.elements[j*(numX + 2) + i] = temp; } } for (int i = numY + 2; i >= 1; i--){ float temp = 0.7850*(i - numY / 2); for (int j = 0; j < numX + 2; j++){ axisY.elements[(numY + 2 - i)*(numX + 2) + j] = temp; } } for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ temp_matrix.elements[i*(numX + 2) + j] = axisX.elements[i*(numX + 2) + j] * axisX.elements[i*(numX + 2) + j] + axisY.elements[i*(numX + 2) + j] * axisY.elements[i*(numX + 2) + j]; } } printf("\n***temp_matrix***\n"); //test temp_matrix /*for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", temp_matrix.elements[i*(numX + 2) + j]); } printf("\n"); }*/ for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ if (temp_matrix.elements[i*(numX + 2) + j] <= (nucleusR*nucleusR)){ chooseX.elements[i*(numX + 2) + j] = 1; } } } printf("\n***chooseX***\n"); //test chooseX /*for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", chooseX.elements[i*(numX + 2) + j]); } printf("\n"); }*/ //float At = -1*4 / 5 * (oi0 + sqrt(5 / 3 * paraA - 4 * oi0 *oi0)); float At = (oi0 + sqrt(5 / 3.0*paraA - 4 * oi0*oi0))*(-0.8); for (int i = 0; i < numX + 2; i++){ for (int j = 0; j < numY + 2; j++){ axisX.elements[i*(numX + 2) + j] = axisX.elements[i*(numX + 2) + j] * 0.9659 + axisY.elements[i*(numX + 2) + j] * 0.2588; } } for (int i = 0; i < numX + 2; i++){ for (int j = 0; j < numY + 2; j++){ axisY.elements[i*(numX + 2) + j] = -axisX.elements[i*(numX + 2) + j] * 0.2588 + axisY.elements[i*(numX + 2) + j] * 0.9659; } } printf("\n***axisX***\n"); //test axisX /* for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", axisX.elements[i*(numX + 2) + j]); } printf("\n"); } */ printf("\n***axisY***\n"); //test axisY /* for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", axisY.elements[i*(numX + 2) + j]); } printf("\n"); } */ for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ nucleusOi.elements[i*(numX + 2) + j] = At* (cos(qh*axisX.elements[i*(numX + 2) + j])*cos(1 / sqrt(3.0)*qh*axisY.elements[i*(numX + 2) + j]) + 0.5 * cos(2 / sqrt(3.0)*qh*axisY.elements[i*(numX + 2) + j])); } } printf("\n***nucleusOi***\n"); //test nucleusOi /*for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", nucleusOi.elements[i*(numX + 2) + j]); } printf("\n"); }*/ for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ oi_host.elements[i*(numX + 2) + j] += chooseX.elements[i*(numX + 2) + j] * nucleusOi.elements[i*(numX + 2) + j]; } } printf("\n***oi_host***\n"); //test oi_host /*for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); }*/ Matrix newoi = AllocateDeviceMatrix(oi_host); //Matrix u = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); Matrix oi = AllocateDeviceMatrix(oi_host); Matrix tempRecordOi2 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi4 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi6 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi32 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); for (int i = 0; i < numT; i++) { printf("round: %d\n", i); //printf("********oi before period************\n"); //update oi_host //periodicalize x for (int j = 0; j < numX + 2; j++){ oi_host.elements[0 * (numX + 2) + j] = oi_host.elements[numY*(numX + 2) + j]; } for (int j = 0; j < numX + 2; j++){ oi_host.elements[(numY + 1)*(numX + 2) + j] = oi_host.elements[1 * (numX + 2) + j]; } //periodicalize y for (int j = 0; j < numY + 2; j++){ oi_host.elements[j*(numX + 2) + 0] = oi_host.elements[j*(numX + 2) + numX]; } for (int j = 0; j < numY + 2; j++){ oi_host.elements[j*(numX + 2) + numX + 1] = oi_host.elements[j*(numX + 2) + 1]; } //test after periodicalize /*printf("oi after periodicalize!\n"); for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); }*/ //Matrix oi = AllocateDeviceMatrix(oi_host); /*Matrix tempRecordOi2 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi4 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi6 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); Matrix tempRecordOi32 = AllocateDeviceMatrix(AllocateMatrix(numX + 2, numY + 2, 0.0)); */ float tempx[1] = { deltaX0 + i*0.00005 }; float tempy[1] = { deltaY0*(deltaX0 / tempx[0]) }; cudaMemcpyToSymbol(deltaX, &tempx, sizeof(float)); cudaMemcpyToSymbol(deltaY, &tempy, sizeof(float)); //cudaDeviceSynchronize(); /* cudaMemcpy(&0.785, tempx, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&0.785, tempy, sizeof(float), cudaMemcpyDeviceToHost); printf("After tempx:%f\n", *tempx); printf("After tempy:%f\n", *tempy); */ //free(tempx); //free(tempy); CopyToDeviceMatrix(oi, oi_host); //CopyFromDeviceMatrix(oi_host, oi); /*printf("********oi before calculation:************\n"); for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); }*/ ////////////////////////////////////////////////////////// dim3 block_size(WIDTH, WIDTH); int grid_rows = oi.height / WIDTH + (oi.height % WIDTH ? 1 : 0); int grid_cols = oi.width / WIDTH + (oi.width % WIDTH ? 1 : 0); dim3 grid_size(grid_cols, grid_rows); allCal<<< 1, 64 >>>(newoi, oi, tempRecordOi2, tempRecordOi4, tempRecordOi6, tempRecordOi32); //allCal << < 1, SIZE >> >(newoi, oi, tempRecordOi2, tempRecordOi4, tempRecordOi6, tempRecordOi32); //allCal_new << < 1, SIZE >> >(newoi, oi, tempRecordOi2, tempRecordOi4, tempRecordOi6, tempRecordOi32); //cudaDeviceSynchronize(); //firstCal << < 1, 64 >> >(oi, tempRecordOi2, tempRecordOi32); /*calOne << <64, SIZE >> >(oi, tempRecordOi2, tempRecordOi32); cudaDeviceSynchronize(); calTwo << <64, SIZE >> >(tempRecordOi2, tempRecordOi4); cudaDeviceSynchronize(); callThree << <64, SIZE >> >(newoi, oi, tempRecordOi2, tempRecordOi4, tempRecordOi6, tempRecordOi32);*/ cudaDeviceSynchronize(); Check_CUDA_Error("Kernel Execution Failed!"); ///////////////////////////////////////////////////////// CopyFromDeviceMatrix(oi_host, newoi); //CopyFromDeviceMatrix(oi_host, tempRecordOi2); /*printf("********oi after calculation:************\n"); for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); }*/ /*if (i == numT - 1){ printf("Free device matrix!\n"); FreeDeviceMatrix(&tempRecordOi2); FreeDeviceMatrix(&tempRecordOi4); FreeDeviceMatrix(&tempRecordOi6); FreeDeviceMatrix(&tempRecordOi32); FreeDeviceMatrix(&oi); FreeDeviceMatrix(&newoi); } */ } //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //float elapsedTime; //cudaEventElapsedTime(&elapsedTime, start, stop); GetLocalTime(&sysstop); for (int i = 0; i < numY + 2; i++){ for (int j = 0; j < numX + 2; j++){ printf("%f\t", oi_host.elements[i*(numX + 2) + j]); } printf("\n"); } // printf("Processing Time: %3.1f ms \n", elapsedTime); printf("Processing Time: %d ms \n", (sysstop.wMilliseconds + sysstop.wSecond * 1000 + sysstop.wMinute * 60000) - (sysstart.wMilliseconds + sysstart.wSecond * 1000 + sysstart.wMinute * 60000)); //ofstream ofile; //定义输出文件 //ofile.open("d:\\myfile.txt"); //作为输出文件打开 //for (int i = 0; numY + 2; i++){} // for (int j = 0; j < numX + 2; j++){ // ofile << oi_host.elements[i*(numX + 2) + j] << "\t"; //数据写入文件 // } // ofile << endl; //} //ofile.close(); //关闭文件 /* printf("Free host matrix!"); FreeMatrix(&axisX); FreeMatrix(&axisY); FreeMatrix(&temp_matrix); FreeMatrix(&chooseX); FreeMatrix(&nucleusOi); */ return 0; } __global__ void periodicalize(Matrix in) { int i = blockIdx.x * blockDim.x + threadIdx.x; float frontRow1 = in.elements[0 + i]; float frontRow2 = in.elements[in.width + i]; float backRow1 = in.elements[in.width * (in.height - 2) + i]; float backRow2 = in.elements[in.width * (in.height - 1) + i]; float leftCol1 = in.elements[in.width * i + 0]; float leftCol2 = in.elements[in.width * i + 1]; float rightCol1 = in.elements[in.width * i + in.width - 2]; float rightCol2 = in.elements[in.width * i + in.width - 1]; in.elements[0 + i] = backRow1; in.elements[in.width + i] = backRow2; in.elements[in.width * (in.height - 2) + i] = frontRow1; in.elements[in.width * (in.height - 1) + i] = frontRow2; in.elements[in.width * i + 0] = rightCol1; in.elements[in.width * i + 1] = rightCol2; in.elements[in.width * i + in.width - 2] = leftCol1; in.elements[in.width * i + in.width - 1] = leftCol2; } __device__ float laplaceCal(float front, float back, float deltaX, float deltaY, float num) { //printf("front:%f, back:%f", front, back); float res = (front / powf(0.785, num)) + (back / powf(0.785, num)); //printf("laplace res: %f", res); return res; } __device__ float laplaceCal_r(float *in, float deltaX, float deltaY, float num){ float res = ((0.125 * (in[2 * 3 + 2] + in[0 * 3 + 2] + in[2 * 3 + 0] + in[0 * 3 + 0]) + 0.75 * (in[2 * 3 + 1] + in[0 * 3 + 1]) - 0.25 * (in[2 * 3 + 1] + in[0 * 3 + 1]) - 1.5 * in[1 * 3 + 1])) / powf(deltaX, num) + (0.125*(in[2 * 3 + 2] + in[0*3+2]+in[2*3+0]+in[0*3+0])+ 0.75*(in[1*3+2]+in[1*3+0])-0.25*(in[1*3+2]+in[1*3+0])-1.5*in[1*3+1])/powf(deltaY,num); //printf("new laplace: %f", res); //printf("deltaX: %f ", deltaX); //printf("deltaY: %f ", deltaY); return res; } __device__ float frontCal(float *in) { float res = 0.125 * (in[2*3 + 2] + in[0*3 + 2] + in[2*3 + 0]+in[0*3+0]) + 0.75 * (in[2*3 + 1] + in[0*3 + 1]) - 0.25 * (in[2*3 + 1] + in[0*3 + 1]) - 1.5 * in[1*3 + 1]; //printf("front:%f\t", res); } __device__ float backCal(float *in) { float res = 0.125 * (in[2 * 3 + 2] + in[0 * 3 + 2] + in[2 * 3 + 0] + in[0 * 3 + 0]) + 0.75 * (in[1*3 + 2] + in[1*3 + 0]) - 0.25 * (in[1*3 + 2] + in[1*3 + 0]) - 1.5 * in[1*3 + 1]; //printf("back:%f\t", res); } __device__ float* getFOI(Matrix in, int i) { //how about using shared mem? //printf("i: %d\n", i); float foi[9]; foi[0 + 0] = in.elements[i - in.width - 1]; foi[0 + 1] = in.elements[i - in.width]; foi[0 + 2] = in.elements[i - in.width + 1]; foi[1*3 + 0] = in.elements[i - 1]; foi[1*3+ 1] = in.elements[i]; foi[1*3+ 2] = in.elements[i + 1]; foi[2*3 + 0] = in.elements[i + in.width - 1]; foi[2*3 + 1] = in.elements[i + in.width]; foi[2*3 + 2] = in.elements[i + in.width + 1]; return foi; } __device__ float* getFOIf(float *in, int i, int width) { //how about using shared mem? //printf("i: %d\n", i); float foi[9]; foi[0 + 0] = in[i - width - 1]; foi[0 + 1] = in[i - width]; foi[0 + 2] = in[i - width + 1]; foi[1 * 3 + 0] = in[i - 1]; foi[1 * 3 + 1] = in[i]; foi[1 * 3 + 2] = in[i + 1]; foi[2 * 3 + 0] = in[i + width - 1]; foi[2 * 3 + 1] = in[i + width]; foi[2 * 3 + 2] = in[i + width + 1]; return foi; } __device__ float* foiPowOf3(float *foi) { float threefoi[9]; //float foithree[9]; /* foithree[0 + 0] = powf(foi[0], 3.0); foithree[0 + 1] = powf(foi[1], 3.0); foithree[0 + 2] = powf(foi[2], 3.0); foithree[1 * 3 + 0] = powf(foi[3], 3.0); foithree[1 * 3 + 1] = powf(foi[4], 3.0); foithree[1 * 3 + 2] = powf(foi[5], 3.0); foithree[2 * 3 + 0] = powf(foi[6], 3.0); foithree[2 * 3 + 1] = powf(foi[7], 3.0); foithree[2 * 3 + 2] = powf(foi[8], 3.0); */ threefoi[0] = foi[0] * foi[0] * foi[0]; threefoi[1] = foi[1] * foi[1] * foi[1]; threefoi[2] = foi[2] * foi[2] * foi[2]; threefoi[3] = foi[3] * foi[3] * foi[3]; threefoi[4] = foi[4] * foi[4] * foi[4]; threefoi[5] = foi[5] * foi[5] * foi[5]; threefoi[6] = foi[6] * foi[6] * foi[6]; threefoi[7] = foi[7] * foi[7] * foi[7]; threefoi[8] = foi[8] * foi[8] * foi[8]; return threefoi; } __device__ void getNowOi(Matrix out, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32, int i) { out.elements[i] = oi.elements[i] + deltaT * ((1 - paraA) * tempRecordOi2.elements[i] + 2 * tempRecordOi4.elements[i] + tempRecordOi6.elements[i] + tempRecordOi32.elements[i]); } __global__ void allCal(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32) { //consider using tile? // int col = blockIdx.x * blockDim.x + threadIdx.x; // int row = blockIdx.y * blockDim.y + threadIdx.y; //use col and row represent i? int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; //int blockId = blockIdx.x + blockIdx.y * gridDim.x; //int i = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; //int i = blockIdx.x *blockDim.x + threadIdx.x; //printf("i: %d\t", i); float x = deltaX[0]; float y = deltaY[0]; newOi.elements[i] = oi.elements[i]; //float *foi = getFOI(oi, i); float *in = getFOI(oi, i); __syncthreads(); /*if (i == 9){ printf("oi:\n"); printf("%f %f %f\n", in[0], in[1], in[2]); printf("%f %f %f\n", in[3], in[4], in[5]); printf("%f %f %f\n", in[6], in[7], in[8]); } */ if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { //tempOi2 = laplaceCal_r(foi, x, y, 2.0); float tempOi2 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi2.elements[i] = tempOi2; __syncthreads(); if (i == 9){ printf("tempoi2:\n"); for (int j = 0; j < 64; j++){ printf("%f\t", tempRecordOi2.elements[j]); if (j % 8 == 7){ printf("\n"); } } } //float *threefoi = foiPowOf3(foi); //tempOi32 = laplaceCal_r(threefoi, x, y, 2.0); float tempOi32 = ((0.125 * (in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 0.25 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 1.5 * in[4] * in[4] * in[4]) / (x*x)) + ((0.125*(in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 0.25*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 1.5*in[4] * in[4] * in[4]) / (y*y)); tempRecordOi32.elements[i] = tempOi32; __syncthreads(); if (i == 9){ printf("tempoi32:\n"); for (int j = 0; j < 64; j++){ printf("%f\t", tempRecordOi32.elements[j]); if (j % 8 == 7){ printf("\n"); } } } //float *twofoi = getFOI(tempRecordOi2, i); float *tin = getFOI(tempRecordOi2, i); //float tempOi4 = laplaceCal_r(twofoi, x, y, 2.0); float tempOi4 = ((0.125 * (tin[8] + tin[2] + tin[6] + tin[0]) + 0.75 * (tin[7] + tin[1]) - 0.25 * (tin[7] + tin[1]) - 1.5 * tin[4]) / (x*x)) + ((0.125*(tin[8] + tin[2] + tin[6] + tin[0]) + 0.75*(tin[5] + tin[3]) - 0.25*(tin[5] + tin[3]) - 1.5*tin[4]) / (y*y)); tempRecordOi4.elements[i] = tempOi4; __syncthreads(); if (i == 9){ printf("tempoi4:\n"); for (int j = 0; j < 64; j++){ printf("%f\t", tempRecordOi4.elements[j]); if (j % 8 == 7){ printf("\n"); } } } /*if (i == 9){ printf("tempoi2:\n"); printf("%f %f %f\n", twofoi[0], twofoi[1], twofoi[2]); printf("%f %f %f\n", twofoi[3], twofoi[4], twofoi[5]); printf("%f %f %f\n", twofoi[6], twofoi[7], twofoi[8]); } */ //float *fourfoi = getFOI(tempRecordOi4, i); float *fin = getFOI(tempRecordOi4, i); //float tempOi6 = laplaceCal_r(fourfoi, x, y, 2.0); float tempOi6 = ((0.125 * (fin[8] + fin[2] + fin[6] + fin[0]) + 0.75 * (fin[7] + fin[1]) - 0.25 * (fin[7] + fin[1]) - 1.5 * fin[4]) / (x*x)) + ((0.125*(fin[8] + fin[2] + fin[6] + fin[0]) + 0.75*(fin[5] + fin[3]) - 0.25*(fin[5] + fin[3]) - 1.5*fin[4]) / (y*y)); tempRecordOi6.elements[i] = tempOi6; if (i == 9){ printf("tempoi6:\n"); for (int j = 0; j < 64; j++){ printf("%f\t", tempRecordOi6.elements[j]); if (j % 8 == 7){ printf("\n"); } } } newOi.elements[i] = oi.elements[i] + deltaT * ((1 - paraA) * tempOi2 + 2 * tempOi4 + tempOi6 + tempOi32); } } __global__ void allCal_new(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32){ int i = blockIdx.x *blockDim.x + threadIdx.x; float x = deltaX[0]; float y = deltaY[0]; __shared__ float temp2[SIZE]; temp2[i] = oi.elements[i]; __syncthreads(); if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { float *in = getFOIf(temp2, i, oi.width); float tempOi2 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi2.elements[i] = tempOi2; float tempOi32 = ((0.125 * (in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 0.25 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 1.5 * in[4] * in[4] * in[4]) / (x*x)) + ((0.125*(in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 0.25*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 1.5*in[4] * in[4] * in[4]) / (y*y)); tempRecordOi32.elements[i] = tempOi32; __syncthreads(); } __shared__ float temp4[SIZE]; temp4[i] = tempRecordOi2.elements[i]; __syncthreads(); if (!(i%tempRecordOi2.width == 0 || i%tempRecordOi2.width == (tempRecordOi2.width - 1) || (i >= 0 && i <= (tempRecordOi2.width - 1)) || (i <= (tempRecordOi2.height*tempRecordOi2.width - 1) && i >= (tempRecordOi2.height - 1)*tempRecordOi2.width) || i >= tempRecordOi2.height*tempRecordOi2.width)) { float *in = getFOIf(temp4, i, tempRecordOi2.width); float tempOi4 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi4.elements[i] = tempOi4; __syncthreads(); } newOi.elements[i] = oi.elements[i]; __shared__ float temp6[SIZE]; temp6[i] = tempRecordOi4.elements[i]; __syncthreads(); if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { float x = deltaX[0]; float y = deltaY[0]; float *in = getFOIf(temp6, i, oi.width); float tempOi6 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi6.elements[i] = tempOi6; __syncthreads(); newOi.elements[i] = oi.elements[i] + deltaT * ((1 - paraA) * tempRecordOi2.elements[i] + 2 * tempRecordOi4.elements[i] + tempOi6 + tempRecordOi32.elements[i]); } } __global__ void firstCal(Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi32){ //consider using tile? //int col = blockIdx.x * blockDim.x + threadIdx.x; //int row = blockIdx.y * blockDim.y + threadIdx.y; //use col and row represent i? //int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; int i = blockIdx.x *blockDim.x + threadIdx.x; if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width)||i>=oi.height*oi.width)){ float *foi = getFOI(oi, i); tempRecordOi2.elements[i] = laplaceCal(frontCal(foi), backCal(foi), 0.785, 0.785, 2.0); float *foithree = foiPowOf3(foi); tempRecordOi32.elements[i] = laplaceCal(frontCal(foithree), backCal(foithree), 0.785, 0.785, 2.0); } } __global__ void calOne(Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi32) { int i = blockIdx.x *blockDim.x + threadIdx.x; /*if (i == 9){ printf("oi!\n"); for (int i = 0; i < 8; i++){ for (int j = 0; j < 8; j++){ printf("%f\t", oi.elements[i*(8) + j]); } printf("\n"); } }*/ __shared__ float temp2[SIZE]; temp2[i] = oi.elements[i]; __syncthreads(); if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { /*if (i == 9){ printf("temp2 print!\n"); for (int i = 0; i < 64; i++){ printf("%f\t", temp2[i]); if (i % 8 == 7) printf("\n"); } } */ float *in = getFOIf(temp2, i, oi.width); __syncthreads(); /* if (i == 9){ printf("i:%d check:%f \n", i,in[0]); printf("i:%d check:%f \n", i, in[1]); printf("i:%d check:%f \n", i, in[2]); printf("i:%d check:%f \n", i, in[3]); printf("i:%d check:%f \n", i, in[4]); printf("i:%d check:%f \n", i, in[5]); printf("i:%d check:%f \n", i, in[6]); printf("i:%d check:%f \n", i, in[7]); printf("i:%d check:%f \n", i, in[8]); }*/ /* printf("************getFOI***************\n"); for (int i = 0; i < 9; i++){ printf("foi[%d]:%f\t", i, foi[i]); }*/ float x = deltaX[0]; float y = deltaY[0]; /*if (i == 9){ printf("x:%f, y:%f \n",x,y); }*/ //printf("deltaX=%f\tdeltaY=%f\n", deltaX, deltaY); //tempRecordOi2.elements[i] = laplaceCal_r(foi, x, y, 2.0); float tempOi2 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi2.elements[i] = tempOi2; __syncthreads(); //printf("tempOi2:%f\t", tempOi2); //float *threefoi = foiPowOf3(in); //printf("in: %f, oi3:%f", in[5], threefoi[5]); //float tempOi32 = laplaceCal(frontCal(foi3), backCal(foi3), 0.785, 0.785, 2.0); //float tempOi32 = laplaceCal_r(threefoi, x, y, 2.0); float tempOi32 = ((0.125 * (in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 0.25 * (in[7] * in[7] * in[7] + in[1] * in[1] * in[1]) - 1.5 * in[4] * in[4] * in[4]) / (x*x)) + ((0.125*(in[8] * in[8] * in[8] + in[2] * in[2] * in[2] + in[6] * in[6] * in[6] + in[0] * in[0] * in[0]) + 0.75*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 0.25*(in[5] * in[5] * in[5] + in[3] * in[3] * in[3]) - 1.5*in[4] * in[4]*in[4]) / (y*y)); tempRecordOi32.elements[i] = tempOi32; } } __global__ void calTwo(Matrix tempRecordOi2, Matrix tempRecordOi4) { int i = blockIdx.x *blockDim.x + threadIdx.x; __shared__ float temp4[SIZE]; temp4[i] = tempRecordOi2.elements[i]; __syncthreads(); if (!(i%tempRecordOi2.width == 0 || i%tempRecordOi2.width == (tempRecordOi2.width - 1) || (i >= 0 && i <= (tempRecordOi2.width - 1)) || (i <= (tempRecordOi2.height*tempRecordOi2.width - 1) && i >= (tempRecordOi2.height - 1)*tempRecordOi2.width) || i >= tempRecordOi2.height*tempRecordOi2.width)) { float x = deltaX[0]; float y = deltaY[0]; //float *twofoi = getFOI(tempRecordOi2, i); //float tempOi4 = laplaceCal(frontCal(foi2), backCal(foi2), 0.785, 0.785, 2); float *in = getFOIf(temp4, i, tempRecordOi2.width); float tempOi4 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi4.elements[i] = tempOi4; __syncthreads(); //float tempOi4 = laplaceCal_r(twofoi, x, y, 2.0); //tempRecordOi4.elements[i] = tempOi4; //printf("tempOi4:%f\t", tempOi4); } } __global__ void callThree(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32) { int i = blockIdx.x *blockDim.x + threadIdx.x; //printf("i: %d\t", i); newOi.elements[i] = oi.elements[i]; __shared__ float temp6[SIZE]; temp6[i] = tempRecordOi4.elements[i]; __syncthreads(); if (!(i%oi.width == 0 || i%oi.width == (oi.width - 1) || (i >= 0 && i <= (oi.width - 1)) || (i <= (oi.height*oi.width - 1) && i >= (oi.height - 1)*oi.width) || i >= oi.height*oi.width)) { float x = deltaX[0]; float y = deltaY[0]; //float *fourfoi = getFOI(tempRecordOi4, i); //float tempOi6 = laplaceCal(frontCal(foi4), backCal(foi4), 0.785, 0.785, 2); //float tempOi6 = laplaceCal_r(fourfoi, x, y, 2.0); //printf("tempOi6:%f\t", tempOi6); //tempRecordOi6.elements[i] = tempOi6; float *in = getFOIf(temp6, i, oi.width); float tempOi6 = ((0.125 * (in[8] + in[2] + in[6] + in[0]) + 0.75 * (in[7] + in[1]) - 0.25 * (in[7] + in[1]) - 1.5 * in[4]) / (x*x)) + ((0.125*(in[8] + in[2] + in[6] + in[0]) + 0.75*(in[5] + in[3]) - 0.25*(in[5] + in[3]) - 1.5*in[4]) / (y*y)); tempRecordOi6.elements[i] = tempOi6; __syncthreads(); newOi.elements[i] = oi.elements[i] + deltaT * ((1 - paraA) * tempRecordOi2.elements[i] + 2 * tempRecordOi4.elements[i] + tempOi6 + tempRecordOi32.elements[i]); } }
22cae2fcea141a830d5aea834f1d0c5bc80ad942.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "init.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uint32_t *v = NULL; hipMalloc(&v, XSIZE*YSIZE); uint32_t val = 1; uint32_t n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( init), dim3(gridBlock),dim3(threadBlock), 0, 0, v,val,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( init), dim3(gridBlock),dim3(threadBlock), 0, 0, v,val,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( init), dim3(gridBlock),dim3(threadBlock), 0, 0, v,val,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
22cae2fcea141a830d5aea834f1d0c5bc80ad942.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "init.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uint32_t *v = NULL; cudaMalloc(&v, XSIZE*YSIZE); uint32_t val = 1; uint32_t n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); init<<<gridBlock,threadBlock>>>(v,val,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { init<<<gridBlock,threadBlock>>>(v,val,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { init<<<gridBlock,threadBlock>>>(v,val,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2bfa4784b27bb236f0f0ca27ba323bf3e5b1413c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel4_plus_2_a [3][2]; static int dims_update_halo_kernel4_plus_2_a_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel4_plus_2_a_gpu(ACC<double> &vol_flux_y, ACC<double> &mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y(0,0,0) = vol_flux_y(2,0,0); if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y(0,0,0) = mass_flux_y(2,0,0); } __global__ void ops_update_halo_kernel4_plus_2_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_a[0][0] * dims_update_halo_kernel4_plus_2_a[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_a[1][0] * dims_update_halo_kernel4_plus_2_a[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel4_plus_2_a[0][0], dims_update_halo_kernel4_plus_2_a[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel4_plus_2_a[1][0], dims_update_halo_kernel4_plus_2_a[1][1], arg1); update_halo_kernel4_plus_2_a_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_a_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,76)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(76,"update_halo_kernel4_plus_2_a"); OPS_kernels[76].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel4_plus_2_a_h[0][0] || ydim0 != dims_update_halo_kernel4_plus_2_a_h[0][1] || xdim1 != dims_update_halo_kernel4_plus_2_a_h[1][0] || ydim1 != dims_update_halo_kernel4_plus_2_a_h[1][1]) { dims_update_halo_kernel4_plus_2_a_h[0][0] = xdim0; dims_update_halo_kernel4_plus_2_a_h[0][1] = ydim0; dims_update_halo_kernel4_plus_2_a_h[1][0] = xdim1; dims_update_halo_kernel4_plus_2_a_h[1][1] = ydim1; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel4_plus_2_a, dims_update_halo_kernel4_plus_2_a_h, sizeof(dims_update_halo_kernel4_plus_2_a))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[76].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[76].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[76].mpi_time += t2-t1; OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 76; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 76; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_a_execute; if (OPS_diags > 1) { ops_timing_realloc(76,"update_halo_kernel4_plus_2_a"); } ops_enqueue_kernel(desc); } #endif
2bfa4784b27bb236f0f0ca27ba323bf3e5b1413c.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel4_plus_2_a [3][2]; static int dims_update_halo_kernel4_plus_2_a_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel4_plus_2_a_gpu(ACC<double> &vol_flux_y, ACC<double> &mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y(0,0,0) = vol_flux_y(2,0,0); if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y(0,0,0) = mass_flux_y(2,0,0); } __global__ void ops_update_halo_kernel4_plus_2_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_a[0][0] * dims_update_halo_kernel4_plus_2_a[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_a[1][0] * dims_update_halo_kernel4_plus_2_a[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel4_plus_2_a[0][0], dims_update_halo_kernel4_plus_2_a[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel4_plus_2_a[1][0], dims_update_halo_kernel4_plus_2_a[1][1], arg1); update_halo_kernel4_plus_2_a_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_a_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,76)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(76,"update_halo_kernel4_plus_2_a"); OPS_kernels[76].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel4_plus_2_a_h[0][0] || ydim0 != dims_update_halo_kernel4_plus_2_a_h[0][1] || xdim1 != dims_update_halo_kernel4_plus_2_a_h[1][0] || ydim1 != dims_update_halo_kernel4_plus_2_a_h[1][1]) { dims_update_halo_kernel4_plus_2_a_h[0][0] = xdim0; dims_update_halo_kernel4_plus_2_a_h[0][1] = ydim0; dims_update_halo_kernel4_plus_2_a_h[1][0] = xdim1; dims_update_halo_kernel4_plus_2_a_h[1][1] = ydim1; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel4_plus_2_a, dims_update_halo_kernel4_plus_2_a_h, sizeof(dims_update_halo_kernel4_plus_2_a))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[76].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel4_plus_2_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[76].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[76].mpi_time += t2-t1; OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 76; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 76; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_a_execute; if (OPS_diags > 1) { ops_timing_realloc(76,"update_halo_kernel4_plus_2_a"); } ops_enqueue_kernel(desc); } #endif
d0e58c443746a5af47400d45113bc65c8b79315e.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=64 --gridDim=64 --no-inline #include "hip/hip_runtime.h" __global__ void foo() { { int x = 4; } { int x = 2; } }
d0e58c443746a5af47400d45113bc65c8b79315e.cu
//pass //--blockDim=64 --gridDim=64 --no-inline #include "cuda.h" __global__ void foo() { { int x = 4; } { int x = 2; } }
e29dcaf81c1e0cec1a0cd42c91c07d4d6f99dd6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Samuel Murtaugh and Jeremy Stacy //6780 Final Project #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <time.h> #include <GL/gl.h> #include <GL/glu.h> #include <GL/glut.h> #define BOARD_DIM 1000 #define NUM_BLOCK 5 #define NUM_THREAD 256 GLint window_w = 1000; GLint window_h = 1000; GLfloat left = 0.0; GLfloat right = 1.0; GLfloat bottom = 0.0; GLfloat top = 1.0; GLint game_w = BOARD_DIM; GLint game_h = BOARD_DIM; int board_dim1, board_dim2; int *old_board, *new_board; int *d_old_board, *d_new_board; int N; __device__ int check_up_left(int *board, int width, int position) { if (board[position - width - 1] == 1) return 1; else return 0; } __device__ int check_up(int *board, int width, int position) { if (board[position - width] == 1) return 1; else return 0; } __device__ int check_up_right(int *board, int width, int position) { if (board[position - width + 1] == 1) return 1; else return 0; } __device__ int check_left(int *board, int width, int position) { if (board[position - 1] == 1) return 1; else return 0; } __device__ int check_right(int *board, int width, int position) { if (board[position + 1] == 1) return 1; else return 0; } __device__ int check_down_left(int *board, int width, int position) { if (board[position + width - 1] == 1) return 1; else return 0; } __device__ int check_down(int *board, int width, int position) { if (board[position + width] == 1) return 1; else return 0; } __device__ int check_down_right(int *board, int width, int position) { if (board[position + width + 1] == 1) return 1; else return 0; } __global__ void update_board(int board_dimh, int board_dimw, int *o_board, int *n_board, int nblock) { int gbl_id = blockIdx.x * blockDim.x + threadIdx.x; int num_squares = board_dimh*board_dimw; int num_alive; int stride = nblock * blockDim.x; for (int i = gbl_id; i < num_squares; i += stride) { num_alive = 0; //look at the 8 neighboring squares if (i == 0) // top-left corner { num_alive += check_right(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); num_alive += check_down_right(o_board, board_dimw, i); } else if (i == board_dimw - 1) // top-right corner { num_alive += check_left(o_board, board_dimw, i); num_alive += check_down_left(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); } else if (i == board_dimw * (board_dimh - 1)) // bottom-left corner { num_alive += check_up(o_board, board_dimw, i); num_alive += check_up_right(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); } else if (i == board_dimw * board_dimh - 1) // bottom-right corner { num_alive += check_up_left(o_board, board_dimw, i); num_alive += check_up(o_board, board_dimw, i); num_alive += check_left(o_board, board_dimw, i); } else if (i < board_dimw) // top row { num_alive += check_left(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); num_alive += check_down_left(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); num_alive += check_down_right(o_board, board_dimw, i); } else if (i > board_dimw * board_dimh - board_dimw - 1) // bottom row { num_alive += check_up_left(o_board, board_dimw, i); num_alive += check_up(o_board, board_dimw, i); num_alive += check_up_right(o_board, board_dimw, i); num_alive += check_left(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); } else if (i % board_dimw == 0) // left column { num_alive += check_up(o_board, board_dimw, i); num_alive += check_up_right(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); num_alive += check_down_right(o_board, board_dimw, i); } else if ((i + 1) % board_dimw == 0) // right column { num_alive += check_up_left(o_board, board_dimw, i); num_alive += check_up(o_board, board_dimw, i); num_alive += check_left(o_board, board_dimw, i); num_alive += check_down_left(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); } else { num_alive += check_up_left(o_board, board_dimw, i); num_alive += check_up(o_board, board_dimw, i); num_alive += check_up_right(o_board, board_dimw, i); num_alive += check_left(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); num_alive += check_down_left(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); num_alive += check_down_right(o_board, board_dimw, i); } //check what happens to this square if (o_board[i] == 1) // cell alive { if (num_alive == 2 || num_alive == 3) // cell stays alive n_board[i] = 1; else // cell dies n_board[i] = 0; } else // cell dead { if (num_alive == 3) // cell becomes alive n_board[i] = 1; else // cell stays dead n_board[i] = 0; } } } void display(){ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); GLfloat xsize = (right - left) / game_w; GLfloat ysize = (top - bottom) / game_h; glBegin(GL_QUADS); for (GLint x = 0; x < game_w; x++){ for (GLint y = 0; y < game_h; y++){ if (old_board[y*game_w + x] == 1) glColor3f(0.0,0.0,0.0); else glColor3f(1.0,1.0,1.0); glVertex2f(x*xsize+left, y*ysize+bottom); glVertex2f((x+1)*xsize+left, y*ysize+bottom); glVertex2f((x+1)*xsize+left,(y+1)*ysize+bottom); glVertex2f(x*xsize+left,(y+1)*ysize+bottom); } } glEnd(); glFlush(); glutSwapBuffers(); } void update(int value) { hipMemcpy(d_old_board, old_board, board_dim1*board_dim2*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( update_board), dim3(NUM_BLOCK), dim3(NUM_THREAD), 0, 0, board_dim1, board_dim2, d_old_board, d_new_board, N); hipMemcpy(new_board, d_new_board, board_dim1*board_dim2*sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); memcpy(old_board, new_board, board_dim1*board_dim2*sizeof(int)); glutPostRedisplay(); glutTimerFunc(1000,update,0); } void reshape(int width, int height){ window_w = width; window_h = height; glViewport(0,0,window_w,window_h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(left, right, bottom, top); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glutPostRedisplay(); } //========================================================== int main(int argc, char **argv) { srand(time(NULL)); glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(window_w,window_h); glutCreateWindow("Life"); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(1, 1, 1, 1); // host variables board_dim1 = BOARD_DIM; board_dim2 = board_dim1; old_board = (int *)calloc(board_dim1*board_dim2, sizeof(int)); new_board = (int *)calloc(board_dim1*board_dim2, sizeof(int)); N = NUM_BLOCK; // initialize board for (int i = 0; i < board_dim1*board_dim2; i++) { old_board[i] = rand() % 2; } // setup device memory hipMalloc((void **)&d_old_board, board_dim1*board_dim2*sizeof(int)); hipMalloc((void **)&d_new_board, board_dim1*board_dim2*sizeof(int)); hipSetDeviceFlags(hipDeviceScheduleBlockingSync); glutTimerFunc(1000, update, 0); glutReshapeFunc(reshape); glutDisplayFunc(display); glutMainLoop(); return 0; }
e29dcaf81c1e0cec1a0cd42c91c07d4d6f99dd6e.cu
//Samuel Murtaugh and Jeremy Stacy //6780 Final Project #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <time.h> #include <GL/gl.h> #include <GL/glu.h> #include <GL/glut.h> #define BOARD_DIM 1000 #define NUM_BLOCK 5 #define NUM_THREAD 256 GLint window_w = 1000; GLint window_h = 1000; GLfloat left = 0.0; GLfloat right = 1.0; GLfloat bottom = 0.0; GLfloat top = 1.0; GLint game_w = BOARD_DIM; GLint game_h = BOARD_DIM; int board_dim1, board_dim2; int *old_board, *new_board; int *d_old_board, *d_new_board; int N; __device__ int check_up_left(int *board, int width, int position) { if (board[position - width - 1] == 1) return 1; else return 0; } __device__ int check_up(int *board, int width, int position) { if (board[position - width] == 1) return 1; else return 0; } __device__ int check_up_right(int *board, int width, int position) { if (board[position - width + 1] == 1) return 1; else return 0; } __device__ int check_left(int *board, int width, int position) { if (board[position - 1] == 1) return 1; else return 0; } __device__ int check_right(int *board, int width, int position) { if (board[position + 1] == 1) return 1; else return 0; } __device__ int check_down_left(int *board, int width, int position) { if (board[position + width - 1] == 1) return 1; else return 0; } __device__ int check_down(int *board, int width, int position) { if (board[position + width] == 1) return 1; else return 0; } __device__ int check_down_right(int *board, int width, int position) { if (board[position + width + 1] == 1) return 1; else return 0; } __global__ void update_board(int board_dimh, int board_dimw, int *o_board, int *n_board, int nblock) { int gbl_id = blockIdx.x * blockDim.x + threadIdx.x; int num_squares = board_dimh*board_dimw; int num_alive; int stride = nblock * blockDim.x; for (int i = gbl_id; i < num_squares; i += stride) { num_alive = 0; //look at the 8 neighboring squares if (i == 0) // top-left corner { num_alive += check_right(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); num_alive += check_down_right(o_board, board_dimw, i); } else if (i == board_dimw - 1) // top-right corner { num_alive += check_left(o_board, board_dimw, i); num_alive += check_down_left(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); } else if (i == board_dimw * (board_dimh - 1)) // bottom-left corner { num_alive += check_up(o_board, board_dimw, i); num_alive += check_up_right(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); } else if (i == board_dimw * board_dimh - 1) // bottom-right corner { num_alive += check_up_left(o_board, board_dimw, i); num_alive += check_up(o_board, board_dimw, i); num_alive += check_left(o_board, board_dimw, i); } else if (i < board_dimw) // top row { num_alive += check_left(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); num_alive += check_down_left(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); num_alive += check_down_right(o_board, board_dimw, i); } else if (i > board_dimw * board_dimh - board_dimw - 1) // bottom row { num_alive += check_up_left(o_board, board_dimw, i); num_alive += check_up(o_board, board_dimw, i); num_alive += check_up_right(o_board, board_dimw, i); num_alive += check_left(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); } else if (i % board_dimw == 0) // left column { num_alive += check_up(o_board, board_dimw, i); num_alive += check_up_right(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); num_alive += check_down_right(o_board, board_dimw, i); } else if ((i + 1) % board_dimw == 0) // right column { num_alive += check_up_left(o_board, board_dimw, i); num_alive += check_up(o_board, board_dimw, i); num_alive += check_left(o_board, board_dimw, i); num_alive += check_down_left(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); } else { num_alive += check_up_left(o_board, board_dimw, i); num_alive += check_up(o_board, board_dimw, i); num_alive += check_up_right(o_board, board_dimw, i); num_alive += check_left(o_board, board_dimw, i); num_alive += check_right(o_board, board_dimw, i); num_alive += check_down_left(o_board, board_dimw, i); num_alive += check_down(o_board, board_dimw, i); num_alive += check_down_right(o_board, board_dimw, i); } //check what happens to this square if (o_board[i] == 1) // cell alive { if (num_alive == 2 || num_alive == 3) // cell stays alive n_board[i] = 1; else // cell dies n_board[i] = 0; } else // cell dead { if (num_alive == 3) // cell becomes alive n_board[i] = 1; else // cell stays dead n_board[i] = 0; } } } void display(){ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); GLfloat xsize = (right - left) / game_w; GLfloat ysize = (top - bottom) / game_h; glBegin(GL_QUADS); for (GLint x = 0; x < game_w; x++){ for (GLint y = 0; y < game_h; y++){ if (old_board[y*game_w + x] == 1) glColor3f(0.0,0.0,0.0); else glColor3f(1.0,1.0,1.0); glVertex2f(x*xsize+left, y*ysize+bottom); glVertex2f((x+1)*xsize+left, y*ysize+bottom); glVertex2f((x+1)*xsize+left,(y+1)*ysize+bottom); glVertex2f(x*xsize+left,(y+1)*ysize+bottom); } } glEnd(); glFlush(); glutSwapBuffers(); } void update(int value) { cudaMemcpy(d_old_board, old_board, board_dim1*board_dim2*sizeof(int), cudaMemcpyHostToDevice); update_board<<<NUM_BLOCK, NUM_THREAD>>>(board_dim1, board_dim2, d_old_board, d_new_board, N); cudaMemcpy(new_board, d_new_board, board_dim1*board_dim2*sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); memcpy(old_board, new_board, board_dim1*board_dim2*sizeof(int)); glutPostRedisplay(); glutTimerFunc(1000,update,0); } void reshape(int width, int height){ window_w = width; window_h = height; glViewport(0,0,window_w,window_h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(left, right, bottom, top); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glutPostRedisplay(); } //========================================================== int main(int argc, char **argv) { srand(time(NULL)); glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(window_w,window_h); glutCreateWindow("Life"); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(1, 1, 1, 1); // host variables board_dim1 = BOARD_DIM; board_dim2 = board_dim1; old_board = (int *)calloc(board_dim1*board_dim2, sizeof(int)); new_board = (int *)calloc(board_dim1*board_dim2, sizeof(int)); N = NUM_BLOCK; // initialize board for (int i = 0; i < board_dim1*board_dim2; i++) { old_board[i] = rand() % 2; } // setup device memory cudaMalloc((void **)&d_old_board, board_dim1*board_dim2*sizeof(int)); cudaMalloc((void **)&d_new_board, board_dim1*board_dim2*sizeof(int)); cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); glutTimerFunc(1000, update, 0); glutReshapeFunc(reshape); glutDisplayFunc(display); glutMainLoop(); return 0; }
f416884aaf01e9564f5335989f92451aaeffdf2f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void shared1R8C1W8C1G(float *A, float *B, float *C, const int N) { // compilador esperto e aproveita o valor de i, mas faz 1W, 2 R nas outras posies da Shared __shared__ float Smem[512]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { Smem[((threadIdx.x+1)*8)%512] = i; C[i] = Smem[(threadIdx.x*8)%512]; } }
f416884aaf01e9564f5335989f92451aaeffdf2f.cu
#include "includes.h" __global__ void shared1R8C1W8C1G(float *A, float *B, float *C, const int N) { // compilador é esperto e aproveita o valor de i, mas faz 1W, 2 R nas outras posições da Shared __shared__ float Smem[512]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { Smem[((threadIdx.x+1)*8)%512] = i; C[i] = Smem[(threadIdx.x*8)%512]; } }
74eb2d340d4e4495de9f9ffbd18e2f6c1fee600c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "atomic_cuda.cuh" #include "atomic_ispc.h" #include <iostream> int log2(int i) { int r = 0; while (i >>= 1) r++; return r; } int bit_reverse(int w, int bits) { int r = 0; for (int i = 0; i < bits; i++) { int bit = (w & (1 << i)) >> i; r |= bit << (bits - i - 1); } return r; } template <typename T> void executeCUDA(size_t ARRAY_SIZE, size_t BIN_COUNT, const T *h_in, const T *h_bins, T *cuda_bins) { T *d_in; T *d_bins; size_t BIN_BYTES = BIN_COUNT * sizeof(T); size_t ARRAY_BYTES = ARRAY_SIZE * sizeof(T); hipMalloc((void **)&d_in, ARRAY_BYTES); hipMalloc((void **)&d_bins, BIN_BYTES); hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); hipMemcpy(d_bins, h_bins, BIN_BYTES, hipMemcpyHostToDevice); hipLaunchKernelGGL(( atomic_cuda), dim3(ARRAY_SIZE / 64), dim3(64), 0, 0, d_bins, d_in, BIN_COUNT); hipMemcpy(cuda_bins, d_bins, BIN_BYTES, hipMemcpyDeviceToHost); hipFree(d_in); hipFree(d_bins); } template <typename T> void executeISPC(size_t ARRAY_SIZE, size_t BIN_COUNT, const T *h_in, const int *h_bins, int *ispc_bins) { ispc::Dim3 grid_dim{static_cast<uint32_t>(ARRAY_SIZE / 64), 1, 1}; ispc::Dim3 block_dim{64, 1, 1}; ispc::atomic_ispc(grid_dim, block_dim, ispc_bins, h_in, BIN_COUNT); } void checkResults(int *cuda, int *ispc, size_t N) { for(size_t i = 0; i < N; i++){ if(cuda[i] != ispc[i]){ std::cerr << "Mismatch at index : " << i << " " << cuda[i] << ", " << ispc[i] << "\n"; } } } int main(int argc, char **argv) { const int ARRAY_SIZE = 65536; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); const int BIN_COUNT = 16; const int BIN_BYTES = BIN_COUNT * sizeof(int); // generate the input array on the host int h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = bit_reverse(i, log2(ARRAY_SIZE)); } int h_bins[BIN_COUNT], cuda_bins[BIN_COUNT], ispc_bins[BIN_COUNT]; for (int i = 0; i < BIN_COUNT; i++) { h_bins[i] = 0; ispc_bins[i] = 0; } executeCUDA(ARRAY_SIZE, BIN_COUNT, h_in, h_bins, cuda_bins); executeISPC(ARRAY_SIZE, BIN_COUNT, h_in, h_bins, ispc_bins); checkResults(cuda_bins, ispc_bins, BIN_COUNT); return 0; }
74eb2d340d4e4495de9f9ffbd18e2f6c1fee600c.cu
#include "atomic_cuda.cuh" #include "atomic_ispc.h" #include <iostream> int log2(int i) { int r = 0; while (i >>= 1) r++; return r; } int bit_reverse(int w, int bits) { int r = 0; for (int i = 0; i < bits; i++) { int bit = (w & (1 << i)) >> i; r |= bit << (bits - i - 1); } return r; } template <typename T> void executeCUDA(size_t ARRAY_SIZE, size_t BIN_COUNT, const T *h_in, const T *h_bins, T *cuda_bins) { T *d_in; T *d_bins; size_t BIN_BYTES = BIN_COUNT * sizeof(T); size_t ARRAY_BYTES = ARRAY_SIZE * sizeof(T); cudaMalloc((void **)&d_in, ARRAY_BYTES); cudaMalloc((void **)&d_bins, BIN_BYTES); cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_bins, h_bins, BIN_BYTES, cudaMemcpyHostToDevice); atomic_cuda<<<ARRAY_SIZE / 64, 64>>>(d_bins, d_in, BIN_COUNT); cudaMemcpy(cuda_bins, d_bins, BIN_BYTES, cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_bins); } template <typename T> void executeISPC(size_t ARRAY_SIZE, size_t BIN_COUNT, const T *h_in, const int *h_bins, int *ispc_bins) { ispc::Dim3 grid_dim{static_cast<uint32_t>(ARRAY_SIZE / 64), 1, 1}; ispc::Dim3 block_dim{64, 1, 1}; ispc::atomic_ispc(grid_dim, block_dim, ispc_bins, h_in, BIN_COUNT); } void checkResults(int *cuda, int *ispc, size_t N) { for(size_t i = 0; i < N; i++){ if(cuda[i] != ispc[i]){ std::cerr << "Mismatch at index : " << i << " " << cuda[i] << ", " << ispc[i] << "\n"; } } } int main(int argc, char **argv) { const int ARRAY_SIZE = 65536; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); const int BIN_COUNT = 16; const int BIN_BYTES = BIN_COUNT * sizeof(int); // generate the input array on the host int h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = bit_reverse(i, log2(ARRAY_SIZE)); } int h_bins[BIN_COUNT], cuda_bins[BIN_COUNT], ispc_bins[BIN_COUNT]; for (int i = 0; i < BIN_COUNT; i++) { h_bins[i] = 0; ispc_bins[i] = 0; } executeCUDA(ARRAY_SIZE, BIN_COUNT, h_in, h_bins, cuda_bins); executeISPC(ARRAY_SIZE, BIN_COUNT, h_in, h_bins, ispc_bins); checkResults(cuda_bins, ispc_bins, BIN_COUNT); return 0; }
14a402b021979bfa5f93ee019d74f7bfe4392771.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void SoftmaxLossBackprop(const float *label, int num_labels, int batch_size, float *diff) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; const int label_value = static_cast<int>(label[idx]); // For each item in the batch, decrease the result of the label's value by 1 diff[idx * num_labels + label_value] -= 1.0f; }
14a402b021979bfa5f93ee019d74f7bfe4392771.cu
#include "includes.h" __global__ void SoftmaxLossBackprop(const float *label, int num_labels, int batch_size, float *diff) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; const int label_value = static_cast<int>(label[idx]); // For each item in the batch, decrease the result of the label's value by 1 diff[idx * num_labels + label_value] -= 1.0f; }
1c871b5e4ff33019852e8ee71293857edace1524.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #define BLOCK_SIZE 16 #define GRID_SIZE 160 #define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE void checkresult(float *ref, float *in, float *out, float *mul, int width){ for(int i = 0 ; i < GRID_SIZE; i++){ for(int j = 0; j < GRID_SIZE; j++){ float sum = 0.0f; int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE; for(int ii = 0; ii < BLOCK_SIZE; ii++){ for(int jj = 0; jj < BLOCK_SIZE; jj++){ sum += in[start + ii * width + jj] * mul[jj]; } } for(int ii = 0; ii < BLOCK_SIZE; ii++){ for(int jj = 0; jj < BLOCK_SIZE; jj++){ if(jj % 2 == 0 && ii % 2 == 0) ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum; else if(jj % 2 == 1 && ii % 2 == 0) ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum; else if(jj % 2 == 1 && ii % 2 == 1) ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum; else ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f; } } } } for(int i = 0; i < SIZE; i++){ if(abs(ref[i]-out[i]) > 1.e-6){ printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]); return; } } printf("results checking passed!\n"); } __global__ void norm(float *in, float *out, float *mul, int width){ int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if(tx >= width || ty >= SIZE/width) return; int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y; float sum = 0.0f; for(int i = 0; i < BLOCK_SIZE; i++){ for(int j = 0; j < BLOCK_SIZE; j++){ sum += in[i * width + j + start] * mul[j]; } } if(tx % 2 == 0 && ty % 2 == 0) out[tx * width + ty] = 2.0 * in[tx * width + ty]/sum; else if(tx % 2 == 1 && ty % 2 == 0) out[tx * width + ty] = in[tx * width + ty]/sum; else if(tx % 2 == 1 && ty % 2 == 1) out[tx * width + ty] = (-1.0) * in[tx * width + ty]/sum; else out[tx * width + ty] = 0.0f; } int main(){ float *hA_in = (float *)malloc(SIZE * sizeof(float)); float *hA_out = (float *)malloc(SIZE * sizeof(float)); float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float)); float *ref = (float *)malloc(SIZE * sizeof(float)); float *dA_in, *dA_out, *dB_in; srand(2016); for(int i = 0; i < SIZE; i++){ hA_in[i] = (float)rand()/(float)RAND_MAX; } for(int i = 0; i < BLOCK_SIZE; i++){ hB_in[i] = (float)rand()/(float)RAND_MAX; } hipMalloc((void **)&dA_in, SIZE * sizeof(float)); hipMalloc((void **)&dA_out, SIZE * sizeof(float)); hipMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float)); hipMemcpy(dA_in, hA_in, SIZE * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice); struct timespec start, end; dim3 grid(GRID_SIZE, GRID_SIZE, 1); dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1); hipDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &start); hipLaunchKernelGGL(( norm), dim3(grid), dim3(block), 0, 0, dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE); hipDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end); printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9); hipMemcpy(hA_out, dA_out, SIZE * sizeof(float), hipMemcpyDeviceToHost); checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE); }
1c871b5e4ff33019852e8ee71293857edace1524.cu
#include<stdio.h> #include<stdlib.h> #include<sys/time.h> #define BLOCK_SIZE 16 #define GRID_SIZE 160 #define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE void checkresult(float *ref, float *in, float *out, float *mul, int width){ for(int i = 0 ; i < GRID_SIZE; i++){ for(int j = 0; j < GRID_SIZE; j++){ float sum = 0.0f; int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE; for(int ii = 0; ii < BLOCK_SIZE; ii++){ for(int jj = 0; jj < BLOCK_SIZE; jj++){ sum += in[start + ii * width + jj] * mul[jj]; } } for(int ii = 0; ii < BLOCK_SIZE; ii++){ for(int jj = 0; jj < BLOCK_SIZE; jj++){ if(jj % 2 == 0 && ii % 2 == 0) ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum; else if(jj % 2 == 1 && ii % 2 == 0) ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum; else if(jj % 2 == 1 && ii % 2 == 1) ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum; else ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f; } } } } for(int i = 0; i < SIZE; i++){ if(abs(ref[i]-out[i]) > 1.e-6){ printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]); return; } } printf("results checking passed!\n"); } __global__ void norm(float *in, float *out, float *mul, int width){ int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if(tx >= width || ty >= SIZE/width) return; int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y; float sum = 0.0f; for(int i = 0; i < BLOCK_SIZE; i++){ for(int j = 0; j < BLOCK_SIZE; j++){ sum += in[i * width + j + start] * mul[j]; } } if(tx % 2 == 0 && ty % 2 == 0) out[tx * width + ty] = 2.0 * in[tx * width + ty]/sum; else if(tx % 2 == 1 && ty % 2 == 0) out[tx * width + ty] = in[tx * width + ty]/sum; else if(tx % 2 == 1 && ty % 2 == 1) out[tx * width + ty] = (-1.0) * in[tx * width + ty]/sum; else out[tx * width + ty] = 0.0f; } int main(){ float *hA_in = (float *)malloc(SIZE * sizeof(float)); float *hA_out = (float *)malloc(SIZE * sizeof(float)); float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float)); float *ref = (float *)malloc(SIZE * sizeof(float)); float *dA_in, *dA_out, *dB_in; srand(2016); for(int i = 0; i < SIZE; i++){ hA_in[i] = (float)rand()/(float)RAND_MAX; } for(int i = 0; i < BLOCK_SIZE; i++){ hB_in[i] = (float)rand()/(float)RAND_MAX; } cudaMalloc((void **)&dA_in, SIZE * sizeof(float)); cudaMalloc((void **)&dA_out, SIZE * sizeof(float)); cudaMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float)); cudaMemcpy(dA_in, hA_in, SIZE * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice); struct timespec start, end; dim3 grid(GRID_SIZE, GRID_SIZE, 1); dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1); cudaDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &start); norm<<<grid, block>>>(dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE); cudaDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end); printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9); cudaMemcpy(hA_out, dA_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost); checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE); }
2d254ae8ebbda5ac65ffeb80ab4244c079ac4e74.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" ////////////////////////////////////////////////////////////////////////////////////////// __global__ void bestFilter(const double *Params, const bool *iMatch, const int *Wh, const float *cmax, const float *mus, int *id, float *x){ int tid,tind,bid, my_chan, ind, Nspikes, Nfilters, Nthreads, Nchan, Nblocks; float max_running = 0.0f; Nspikes = (int) Params[0]; Nfilters = (int) Params[2]; Nchan = (int) Params[7]; Nthreads = blockDim.x; Nblocks = gridDim.x; tid = threadIdx.x; bid = blockIdx.x; tind = tid + bid * Nthreads; while (tind<Nspikes){ max_running = mus[tind] * mus[tind]; id[tind] = 0; my_chan = Wh[tind]; for(ind=0; ind<Nfilters; ind++) if (iMatch[my_chan + ind * Nchan]) if (cmax[tind + ind*Nspikes] < max_running){ id[tind] = ind; max_running = cmax[tind + ind*Nspikes]; } x[tind] = max_running; tind += Nblocks*Nthreads; } }
2d254ae8ebbda5ac65ffeb80ab4244c079ac4e74.cu
#include "includes.h" ////////////////////////////////////////////////////////////////////////////////////////// __global__ void bestFilter(const double *Params, const bool *iMatch, const int *Wh, const float *cmax, const float *mus, int *id, float *x){ int tid,tind,bid, my_chan, ind, Nspikes, Nfilters, Nthreads, Nchan, Nblocks; float max_running = 0.0f; Nspikes = (int) Params[0]; Nfilters = (int) Params[2]; Nchan = (int) Params[7]; Nthreads = blockDim.x; Nblocks = gridDim.x; tid = threadIdx.x; bid = blockIdx.x; tind = tid + bid * Nthreads; while (tind<Nspikes){ max_running = mus[tind] * mus[tind]; id[tind] = 0; my_chan = Wh[tind]; for(ind=0; ind<Nfilters; ind++) if (iMatch[my_chan + ind * Nchan]) if (cmax[tind + ind*Nspikes] < max_running){ id[tind] = ind; max_running = cmax[tind + ind*Nspikes]; } x[tind] = max_running; tind += Nblocks*Nthreads; } }
ca48910f41a73cb067f75ffc819afa5ad96785fa.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sparse_fully_connected_1x1_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "neural_network_cusparse_exception.h" #include "neural_network_cublas_exception.h" #include "neural_network_cuda_exception.h" #include "../sparse_convolution_layer.h" namespace nnforge { namespace cuda { __global__ void sparse_fully_connected_1x1_update_biases_upd_kernel( float * __restrict gradient_biases, const float * __restrict output_errors, int block_size, int output_elem_count_per_entry, int entry_count, int block_count) { int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int block_id = blockIdx.y * blockDim.y + threadIdx.y; if ((output_neuron_id < output_elem_count_per_entry) && (block_id < block_count)) { int base_entry_id = block_size * block_id; int iteration_count = min(entry_count - base_entry_id, block_size); const float * current_error = output_errors + (base_entry_id * output_elem_count_per_entry + output_neuron_id); float sum = 0.0F; for(int i = 0; i < iteration_count; ++i) { sum += *current_error; current_error += output_elem_count_per_entry; } atomicAdd(gradient_biases + output_neuron_id, sum); } } #define OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE 4 __global__ void sparse_fully_connected_1x1_backprop_upd_kernel( const float * __restrict output_errors, float * __restrict input_errors, const float * __restrict weights, const int * __restrict column_indices, const int * __restrict row_ptrs, int output_elem_count_per_entry, int entry_count, int entry32_block_size) { int row_id = blockIdx.y * blockDim.y + threadIdx.y; if (row_id >= output_elem_count_per_entry) return; int start_column_index = __load_nc(row_ptrs + row_id); int end_column_index = __load_nc(row_ptrs + row_id + 1); int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; int base_nnz_index = start_column_index + base_column_index_offset; if (base_nnz_index >= end_column_index) return; int max_valid_lane = min(OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE, end_column_index - base_nnz_index); bool valid[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE]; int column_ids[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE]; float w[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE]; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i) { valid[i] = (i < max_valid_lane); int index = valid[i] ? base_nnz_index + i : (end_column_index - 1); column_ids[i] = __load_nc(column_indices + index); w[i] = __load_nc(weights + index); } int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size; if (base_entry_id >= entry_count) return; int lane_id = thread_id_x & 31; int current_entry_id = base_entry_id + lane_id; const float * base_output_errors = output_errors + row_id * entry_count; for(int j = 0; j < entry32_block_size; ++j, current_entry_id += 32) { if (current_entry_id < entry_count) { float output_error = __load_nc(base_output_errors + current_entry_id); #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i) if (valid[i]) atomicAdd(input_errors + column_ids[i] * entry_count + current_entry_id, output_error * w[i]); } } } #define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4 extern __shared__ float arr_sh[]; template<bool single_entry_pass> __global__ void sparse_fully_connected_1x1_update_weights_kernel( const float * __restrict output_errors, const float * __restrict input_neurons, float * __restrict gradient_weights, const int * __restrict column_indices, const int * __restrict row_ptrs, int output_elem_count_per_entry, int entry_count, int entry32_block_size) { int row_id = blockIdx.y * blockDim.y + threadIdx.y; if (row_id >= output_elem_count_per_entry) return; int start_column_index = __load_nc(row_ptrs + row_id); int end_column_index = __load_nc(row_ptrs + row_id + 1); int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BLOCK_SIZE; int base_nnz_index = start_column_index + base_column_index_offset; if (base_nnz_index >= end_column_index) return; int max_valid_lane = min(OUTPUT_ELEM_COUNT_BLOCK_SIZE, end_column_index - base_nnz_index); bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE]; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) valid[i] = (i < max_valid_lane); int column_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE]; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) column_ids[i] = __load_nc(column_indices + (valid[i] ? base_nnz_index + i : (end_column_index - 1))); int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size; if (base_entry_id >= entry_count) return; int lane_id = thread_id_x & 31; int current_entry_id = base_entry_id + lane_id; float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE]; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) sums[i] = 0.0F; const float * base_output_errors = output_errors + row_id * entry_count; for(int j = 0; j < entry32_block_size; ++j, current_entry_id += 32) { if (current_entry_id < entry_count) { float output_error = __load_nc(base_output_errors + current_entry_id); #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) sums[i] += __load_nc(input_neurons + column_ids[i] * entry_count + current_entry_id) * output_error; } } #if __CUDA_ARCH__ < 300 int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x; int warp_id = thread_id >> 5; volatile float * arr = arr_sh; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id] = sums[i]; #endif #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { #if __CUDA_ARCH__ < 300 if (lane_id < tx) { #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id] += arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id + tx]; } #else #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) sums[i] += __shfl_xor(sums[i], tx); #endif } #if __CUDA_ARCH__ < 300 if (lane_id < max_valid_lane) sums[0] = arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + lane_id * 32]; #else #pragma unroll for(int i = 1; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) if (lane_id == i) sums[0] = sums[i]; #endif if (lane_id < max_valid_lane) { if (single_entry_pass) { gradient_weights[base_nnz_index + lane_id] += sums[0]; } else { atomicAdd(gradient_weights + base_nnz_index + lane_id, sums[0]); } } } sparse_fully_connected_1x1_layer_updater_cuda::sparse_fully_connected_1x1_layer_updater_cuda() { } sparse_fully_connected_1x1_layer_updater_cuda::~sparse_fully_connected_1x1_layer_updater_cuda() { } void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { // Copy bias cuda_util::duplicate_vector( *cuda_config, *data[1], *output_neurons_buffer, output_elem_count_per_entry, entry_count, stream_id); cusparse_safe_call(hipsparseSetStream(cuda_config->get_cusparse_handle(), stream_id)); float alpha = 1.0F; float beta = 1.0F; hipsparseMatDescr_t mat_descr; cusparse_safe_call(hipsparseCreateMatDescr(&mat_descr)); cusparse_safe_call(hipsparseScsrmm( cuda_config->get_cusparse_handle(), HIPSPARSE_OPERATION_NON_TRANSPOSE, output_elem_count_per_entry, entry_count, input_elem_count_per_entry, feature_map_connection_count, &alpha, mat_descr, *data[0], *data_custom[1], *data_custom[0], (const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id, input_elem_count_per_entry, &beta, *output_neurons_buffer, output_elem_count_per_entry)); } void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { // Too slow /* cusparse_safe_call(hipsparseSetStream(cuda_config->get_cusparse_handle(), stream_id)); float alpha = 1.0F; float beta = 0.0F; hipsparseMatDescr_t mat_descr; cusparse_safe_call(hipsparseCreateMatDescr(&mat_descr)); cusparse_safe_call(hipsparseScsrmm( cuda_config->get_cusparse_handle(), HIPSPARSE_OPERATION_TRANSPOSE, output_elem_count_per_entry, entry_count, input_elem_count_per_entry, feature_map_connection_count, &alpha, mat_descr, *data[0], *data_custom[1], *data_custom[0], *output_errors_buffer, output_elem_count_per_entry, &beta, *input_errors_buffer, input_elem_count_per_entry)); */ cuda_util::set_with_value( *cuda_config, *additional_buffers[0], 0.0F, input_elem_count_per_entry * entry_count, stream_id); std::pair<int, int> entry32_block_size_and_count = get_entry32_backprop_block_size_and_count(entry_count); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, 32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE), output_elem_count_per_entry, entry32_block_size_and_count.second, 32); hipLaunchKernelGGL(( sparse_fully_connected_1x1_backprop_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *additional_buffers[1], *additional_buffers[0], *data[0], *data_custom[0], *data_custom[1], output_elem_count_per_entry, entry_count, entry32_block_size_and_count.first); cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id)); // transpose input { float alpha = 1.0F; float beta = 0.0F; cublas_safe_call(hipblasSgeam( cuda_config->get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, input_elem_count_per_entry, entry_count, &alpha, *additional_buffers[0], entry_count, &beta, *input_errors_buffer, input_elem_count_per_entry, *input_errors_buffer, input_elem_count_per_entry)); } } void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_update_weights( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, cuda_linear_buffer_device_smart_ptr output_errors_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { // Update weights { cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id)); // transpose input { float alpha = 1.0F; float beta = 0.0F; cublas_safe_call(hipblasSgeam( cuda_config->get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, entry_count, input_elem_count_per_entry, &alpha, (const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id, input_elem_count_per_entry, &beta, *additional_buffers[0], entry_count, *additional_buffers[0], entry_count)); } // transpose output { float alpha = 1.0F; float beta = 0.0F; cublas_safe_call(hipblasSgeam( cuda_config->get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, entry_count, output_elem_count_per_entry, &alpha, *output_errors_buffer, output_elem_count_per_entry, &beta, *additional_buffers[1], entry_count, *additional_buffers[1], entry_count)); } std::pair<int, int> entry32_block_size_and_count = get_entry32_update_block_size_and_count(entry_count); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, 32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE), output_elem_count_per_entry, entry32_block_size_and_count.second, 32); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = (cuda_config->get_compute_capability() < 300) ? threadblock_size * OUTPUT_ELEM_COUNT_BLOCK_SIZE * sizeof(float) : 0; if (entry32_block_size_and_count.second > 1) { hipLaunchKernelGGL(( sparse_fully_connected_1x1_update_weights_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id, *additional_buffers[1], *additional_buffers[0], *gradient[0], *data_custom[0], *data_custom[1], output_elem_count_per_entry, entry_count, entry32_block_size_and_count.first); } else { hipLaunchKernelGGL(( sparse_fully_connected_1x1_update_weights_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id, *additional_buffers[1], *additional_buffers[0], *gradient[0], *data_custom[0], *data_custom[1], output_elem_count_per_entry, entry_count, entry32_block_size_and_count.first); } } // Update biases { int block_size = get_block_size(entry_count); int block_count = (entry_count + block_size - 1) / block_size; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_entry, block_count, 1); hipLaunchKernelGGL(( sparse_fully_connected_1x1_update_biases_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *gradient[1], *output_errors_buffer, block_size, output_elem_count_per_entry, entry_count, block_count); } } bool sparse_fully_connected_1x1_layer_updater_cuda::is_in_place_backprop() const { return false; } int sparse_fully_connected_1x1_layer_updater_cuda::get_block_size(int entry_count) { int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count); return block_size; } void sparse_fully_connected_1x1_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema); feature_map_connection_count = layer_derived->feature_map_connection_count; int input_data_single_update_32block_entry_size = input_elem_count_per_entry * 32 * sizeof(float); max_entry32_update_block_size = ::max(1, cuda_config->l2_cache_size / 2 / input_data_single_update_32block_entry_size); int input_data_single_backprop_32block_entry_size = input_elem_count_per_entry * 32 * sizeof(float); max_entry32_backprop_block_size = ::max(1, cuda_config->l2_cache_size / 2 / input_data_single_backprop_32block_entry_size); } std::vector<size_t> sparse_fully_connected_1x1_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(input_elem_count_per_entry * sizeof(float)); res.push_back(output_elem_count_per_entry * sizeof(float)); return res; } void sparse_fully_connected_1x1_layer_updater_cuda::notify_data_custom(const_layer_data_custom_smart_ptr host_data_custom) { max_column_index_count_per_row = 0; const std::vector<int>& row_indices = host_data_custom->at(1); for(int i = 0; i < row_indices.size() - 1; ++i) max_column_index_count_per_row = ::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]); } std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_update_block_size_and_count(unsigned int entry_count) const { int candidate_block_size = (entry_count + 32 - 1) / 32; if (candidate_block_size <= max_entry32_update_block_size) return std::make_pair(candidate_block_size, 1); int candidate_block_count2 = (candidate_block_size + max_entry32_update_block_size - 1) / max_entry32_update_block_size; int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2; return std::make_pair(candidate_block_size2, candidate_block_count2); } std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_backprop_block_size_and_count(unsigned int entry_count) const { int candidate_block_size = (entry_count + 32 - 1) / 32; if (candidate_block_size <= max_entry32_backprop_block_size) return std::make_pair(candidate_block_size, 1); int candidate_block_count2 = (candidate_block_size + max_entry32_backprop_block_size - 1) / max_entry32_backprop_block_size; int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2; return std::make_pair(candidate_block_size2, candidate_block_count2); } } }
ca48910f41a73cb067f75ffc819afa5ad96785fa.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sparse_fully_connected_1x1_layer_updater_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "neural_network_cusparse_exception.h" #include "neural_network_cublas_exception.h" #include "neural_network_cuda_exception.h" #include "../sparse_convolution_layer.h" namespace nnforge { namespace cuda { __global__ void sparse_fully_connected_1x1_update_biases_upd_kernel( float * __restrict gradient_biases, const float * __restrict output_errors, int block_size, int output_elem_count_per_entry, int entry_count, int block_count) { int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int block_id = blockIdx.y * blockDim.y + threadIdx.y; if ((output_neuron_id < output_elem_count_per_entry) && (block_id < block_count)) { int base_entry_id = block_size * block_id; int iteration_count = min(entry_count - base_entry_id, block_size); const float * current_error = output_errors + (base_entry_id * output_elem_count_per_entry + output_neuron_id); float sum = 0.0F; for(int i = 0; i < iteration_count; ++i) { sum += *current_error; current_error += output_elem_count_per_entry; } atomicAdd(gradient_biases + output_neuron_id, sum); } } #define OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE 4 __global__ void sparse_fully_connected_1x1_backprop_upd_kernel( const float * __restrict output_errors, float * __restrict input_errors, const float * __restrict weights, const int * __restrict column_indices, const int * __restrict row_ptrs, int output_elem_count_per_entry, int entry_count, int entry32_block_size) { int row_id = blockIdx.y * blockDim.y + threadIdx.y; if (row_id >= output_elem_count_per_entry) return; int start_column_index = __load_nc(row_ptrs + row_id); int end_column_index = __load_nc(row_ptrs + row_id + 1); int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; int base_nnz_index = start_column_index + base_column_index_offset; if (base_nnz_index >= end_column_index) return; int max_valid_lane = min(OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE, end_column_index - base_nnz_index); bool valid[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE]; int column_ids[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE]; float w[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE]; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i) { valid[i] = (i < max_valid_lane); int index = valid[i] ? base_nnz_index + i : (end_column_index - 1); column_ids[i] = __load_nc(column_indices + index); w[i] = __load_nc(weights + index); } int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size; if (base_entry_id >= entry_count) return; int lane_id = thread_id_x & 31; int current_entry_id = base_entry_id + lane_id; const float * base_output_errors = output_errors + row_id * entry_count; for(int j = 0; j < entry32_block_size; ++j, current_entry_id += 32) { if (current_entry_id < entry_count) { float output_error = __load_nc(base_output_errors + current_entry_id); #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i) if (valid[i]) atomicAdd(input_errors + column_ids[i] * entry_count + current_entry_id, output_error * w[i]); } } } #define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4 extern __shared__ float arr_sh[]; template<bool single_entry_pass> __global__ void sparse_fully_connected_1x1_update_weights_kernel( const float * __restrict output_errors, const float * __restrict input_neurons, float * __restrict gradient_weights, const int * __restrict column_indices, const int * __restrict row_ptrs, int output_elem_count_per_entry, int entry_count, int entry32_block_size) { int row_id = blockIdx.y * blockDim.y + threadIdx.y; if (row_id >= output_elem_count_per_entry) return; int start_column_index = __load_nc(row_ptrs + row_id); int end_column_index = __load_nc(row_ptrs + row_id + 1); int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BLOCK_SIZE; int base_nnz_index = start_column_index + base_column_index_offset; if (base_nnz_index >= end_column_index) return; int max_valid_lane = min(OUTPUT_ELEM_COUNT_BLOCK_SIZE, end_column_index - base_nnz_index); bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE]; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) valid[i] = (i < max_valid_lane); int column_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE]; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) column_ids[i] = __load_nc(column_indices + (valid[i] ? base_nnz_index + i : (end_column_index - 1))); int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size; if (base_entry_id >= entry_count) return; int lane_id = thread_id_x & 31; int current_entry_id = base_entry_id + lane_id; float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE]; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) sums[i] = 0.0F; const float * base_output_errors = output_errors + row_id * entry_count; for(int j = 0; j < entry32_block_size; ++j, current_entry_id += 32) { if (current_entry_id < entry_count) { float output_error = __load_nc(base_output_errors + current_entry_id); #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) sums[i] += __load_nc(input_neurons + column_ids[i] * entry_count + current_entry_id) * output_error; } } #if __CUDA_ARCH__ < 300 int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x; int warp_id = thread_id >> 5; volatile float * arr = arr_sh; #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id] = sums[i]; #endif #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { #if __CUDA_ARCH__ < 300 if (lane_id < tx) { #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id] += arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id + tx]; } #else #pragma unroll for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) sums[i] += __shfl_xor(sums[i], tx); #endif } #if __CUDA_ARCH__ < 300 if (lane_id < max_valid_lane) sums[0] = arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + lane_id * 32]; #else #pragma unroll for(int i = 1; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i) if (lane_id == i) sums[0] = sums[i]; #endif if (lane_id < max_valid_lane) { if (single_entry_pass) { gradient_weights[base_nnz_index + lane_id] += sums[0]; } else { atomicAdd(gradient_weights + base_nnz_index + lane_id, sums[0]); } } } sparse_fully_connected_1x1_layer_updater_cuda::sparse_fully_connected_1x1_layer_updater_cuda() { } sparse_fully_connected_1x1_layer_updater_cuda::~sparse_fully_connected_1x1_layer_updater_cuda() { } void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { // Copy bias cuda_util::duplicate_vector( *cuda_config, *data[1], *output_neurons_buffer, output_elem_count_per_entry, entry_count, stream_id); cusparse_safe_call(cusparseSetStream(cuda_config->get_cusparse_handle(), stream_id)); float alpha = 1.0F; float beta = 1.0F; cusparseMatDescr_t mat_descr; cusparse_safe_call(cusparseCreateMatDescr(&mat_descr)); cusparse_safe_call(cusparseScsrmm( cuda_config->get_cusparse_handle(), CUSPARSE_OPERATION_NON_TRANSPOSE, output_elem_count_per_entry, entry_count, input_elem_count_per_entry, feature_map_connection_count, &alpha, mat_descr, *data[0], *data_custom[1], *data_custom[0], (const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id, input_elem_count_per_entry, &beta, *output_neurons_buffer, output_elem_count_per_entry)); } void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { // Too slow /* cusparse_safe_call(cusparseSetStream(cuda_config->get_cusparse_handle(), stream_id)); float alpha = 1.0F; float beta = 0.0F; cusparseMatDescr_t mat_descr; cusparse_safe_call(cusparseCreateMatDescr(&mat_descr)); cusparse_safe_call(cusparseScsrmm( cuda_config->get_cusparse_handle(), CUSPARSE_OPERATION_TRANSPOSE, output_elem_count_per_entry, entry_count, input_elem_count_per_entry, feature_map_connection_count, &alpha, mat_descr, *data[0], *data_custom[1], *data_custom[0], *output_errors_buffer, output_elem_count_per_entry, &beta, *input_errors_buffer, input_elem_count_per_entry)); */ cuda_util::set_with_value( *cuda_config, *additional_buffers[0], 0.0F, input_elem_count_per_entry * entry_count, stream_id); std::pair<int, int> entry32_block_size_and_count = get_entry32_backprop_block_size_and_count(entry_count); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, 32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE), output_elem_count_per_entry, entry32_block_size_and_count.second, 32); sparse_fully_connected_1x1_backprop_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *additional_buffers[1], *additional_buffers[0], *data[0], *data_custom[0], *data_custom[1], output_elem_count_per_entry, entry_count, entry32_block_size_and_count.first); cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id)); // transpose input { float alpha = 1.0F; float beta = 0.0F; cublas_safe_call(cublasSgeam( cuda_config->get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, input_elem_count_per_entry, entry_count, &alpha, *additional_buffers[0], entry_count, &beta, *input_errors_buffer, input_elem_count_per_entry, *input_errors_buffer, input_elem_count_per_entry)); } } void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_update_weights( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, cuda_linear_buffer_device_smart_ptr output_errors_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { // Update weights { cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id)); // transpose input { float alpha = 1.0F; float beta = 0.0F; cublas_safe_call(cublasSgeam( cuda_config->get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, entry_count, input_elem_count_per_entry, &alpha, (const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id, input_elem_count_per_entry, &beta, *additional_buffers[0], entry_count, *additional_buffers[0], entry_count)); } // transpose output { float alpha = 1.0F; float beta = 0.0F; cublas_safe_call(cublasSgeam( cuda_config->get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, entry_count, output_elem_count_per_entry, &alpha, *output_errors_buffer, output_elem_count_per_entry, &beta, *additional_buffers[1], entry_count, *additional_buffers[1], entry_count)); } std::pair<int, int> entry32_block_size_and_count = get_entry32_update_block_size_and_count(entry_count); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, 32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE), output_elem_count_per_entry, entry32_block_size_and_count.second, 32); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = (cuda_config->get_compute_capability() < 300) ? threadblock_size * OUTPUT_ELEM_COUNT_BLOCK_SIZE * sizeof(float) : 0; if (entry32_block_size_and_count.second > 1) { sparse_fully_connected_1x1_update_weights_kernel<false><<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>( *additional_buffers[1], *additional_buffers[0], *gradient[0], *data_custom[0], *data_custom[1], output_elem_count_per_entry, entry_count, entry32_block_size_and_count.first); } else { sparse_fully_connected_1x1_update_weights_kernel<true><<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>( *additional_buffers[1], *additional_buffers[0], *gradient[0], *data_custom[0], *data_custom[1], output_elem_count_per_entry, entry_count, entry32_block_size_and_count.first); } } // Update biases { int block_size = get_block_size(entry_count); int block_count = (entry_count + block_size - 1) / block_size; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_entry, block_count, 1); sparse_fully_connected_1x1_update_biases_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *gradient[1], *output_errors_buffer, block_size, output_elem_count_per_entry, entry_count, block_count); } } bool sparse_fully_connected_1x1_layer_updater_cuda::is_in_place_backprop() const { return false; } int sparse_fully_connected_1x1_layer_updater_cuda::get_block_size(int entry_count) { int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count); return block_size; } void sparse_fully_connected_1x1_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema); feature_map_connection_count = layer_derived->feature_map_connection_count; int input_data_single_update_32block_entry_size = input_elem_count_per_entry * 32 * sizeof(float); max_entry32_update_block_size = std::max(1, cuda_config->l2_cache_size / 2 / input_data_single_update_32block_entry_size); int input_data_single_backprop_32block_entry_size = input_elem_count_per_entry * 32 * sizeof(float); max_entry32_backprop_block_size = std::max(1, cuda_config->l2_cache_size / 2 / input_data_single_backprop_32block_entry_size); } std::vector<size_t> sparse_fully_connected_1x1_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(input_elem_count_per_entry * sizeof(float)); res.push_back(output_elem_count_per_entry * sizeof(float)); return res; } void sparse_fully_connected_1x1_layer_updater_cuda::notify_data_custom(const_layer_data_custom_smart_ptr host_data_custom) { max_column_index_count_per_row = 0; const std::vector<int>& row_indices = host_data_custom->at(1); for(int i = 0; i < row_indices.size() - 1; ++i) max_column_index_count_per_row = std::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]); } std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_update_block_size_and_count(unsigned int entry_count) const { int candidate_block_size = (entry_count + 32 - 1) / 32; if (candidate_block_size <= max_entry32_update_block_size) return std::make_pair(candidate_block_size, 1); int candidate_block_count2 = (candidate_block_size + max_entry32_update_block_size - 1) / max_entry32_update_block_size; int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2; return std::make_pair(candidate_block_size2, candidate_block_count2); } std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_backprop_block_size_and_count(unsigned int entry_count) const { int candidate_block_size = (entry_count + 32 - 1) / 32; if (candidate_block_size <= max_entry32_backprop_block_size) return std::make_pair(candidate_block_size, 1); int candidate_block_count2 = (candidate_block_size + max_entry32_backprop_block_size - 1) / max_entry32_backprop_block_size; int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2; return std::make_pair(candidate_block_size2, candidate_block_count2); } } }
715cb6f023c949e0b185656e284ea7a5f5972222.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void DummyFunction(int *result) { int x = blockIdx.x; result[x] = ((result[x] * result[x]) * 1); }
715cb6f023c949e0b185656e284ea7a5f5972222.cu
extern "C" __global__ void DummyFunction(int *result) { int x = blockIdx.x; result[x] = ((result[x] * result[x]) * 1); }
6dce5c2b38b04024089704f54cc03f65176bc707.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by root on 2020/11/30. // #include "kernel_hip.cuh" #include "stdio.h" #include <helper_math.h> #define TPB 64 __global__ void centroidKernel(uchar4 *d_img, int *d_centroidCol, int *d_centroidRow, int *d_pixelCount, int width, int height) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int s_idx = threadIdx.x; int row = idx / width; int col = idx % width; __shared__ uint4 s_img[TPB]; if ((d_img[idx].x < 255 || d_img[idx].y < 255 || d_img[idx].z < 255) && idx < width * height) { s_img[s_idx].x = col; s_img[s_idx].y = row; s_img[s_idx].z = 1; } else { s_img[s_idx].x = 0; s_img[s_idx].y = 0; s_img[s_idx].z = 0; } __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (s_idx < s) { s_img[s_idx] += s_img[s_idx + s]; __syncthreads(); } } if (s_idx == 0) { atomicAdd(d_centroidCol, s_img[0].x); atomicAdd(d_centroidRow, s_img[0].y); atomicAdd(d_pixelCount, s_img[0].z); } } void centroidParallel(uchar4 *img, int width, int height) { uchar4 *d_img = 0; int *d_centroidRow = 0, *d_centroidCol = 0, *d_pixelCount = 0; int centroidRow = 0, centroidCol = 0, pixelCount = 0; hipMalloc(&d_img, width * height * sizeof(uchar4)); hipMemcpy(d_img, img, width * height * sizeof(uchar4), hipMemcpyHostToDevice); hipMalloc(&d_centroidRow, sizeof(int )); hipMalloc(&d_centroidCol, sizeof(int )); hipMalloc(&d_pixelCount, sizeof(int )); hipMemset(d_centroidRow, 0, sizeof(int )); hipMemset(d_centroidCol, 0, sizeof(int )); hipMemset(d_pixelCount, 0, sizeof(int )); hipLaunchKernelGGL(( centroidKernel), dim3((width * height + TPB - 1) / TPB), dim3(TPB), 0, 0, d_img, d_centroidCol, d_centroidRow, d_pixelCount, width, height); hipMemcpy(&centroidCol, d_centroidCol, sizeof(int ), hipMemcpyDeviceToHost); hipMemcpy(&centroidRow, d_centroidRow, sizeof(int ), hipMemcpyDeviceToHost); hipMemcpy(&pixelCount, d_pixelCount, sizeof(int ), hipMemcpyDeviceToHost); centroidCol /= pixelCount; centroidRow /= pixelCount; printf("Centroid: (col: %d, row: %d) based on %d pixels\n", centroidCol, centroidRow, pixelCount); for (int col = 0; col < width; col++) { img[centroidRow * width + col].x = 255; img[centroidRow * width + col].y = 0; img[centroidRow * width + col].z = 0; } for (int row = 0; row < height; row++) { img[row * width + centroidCol].x = 255; img[row * width + centroidCol].y = 0; img[row * width + centroidCol].z = 0; } hipFree(d_img); hipFree(d_centroidRow); hipFree(d_centroidCol); hipFree(d_pixelCount); }
6dce5c2b38b04024089704f54cc03f65176bc707.cu
// // Created by root on 2020/11/30. // #include "kernel.cuh" #include "stdio.h" #include <helper_math.h> #define TPB 64 __global__ void centroidKernel(uchar4 *d_img, int *d_centroidCol, int *d_centroidRow, int *d_pixelCount, int width, int height) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int s_idx = threadIdx.x; int row = idx / width; int col = idx % width; __shared__ uint4 s_img[TPB]; if ((d_img[idx].x < 255 || d_img[idx].y < 255 || d_img[idx].z < 255) && idx < width * height) { s_img[s_idx].x = col; s_img[s_idx].y = row; s_img[s_idx].z = 1; } else { s_img[s_idx].x = 0; s_img[s_idx].y = 0; s_img[s_idx].z = 0; } __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (s_idx < s) { s_img[s_idx] += s_img[s_idx + s]; __syncthreads(); } } if (s_idx == 0) { atomicAdd(d_centroidCol, s_img[0].x); atomicAdd(d_centroidRow, s_img[0].y); atomicAdd(d_pixelCount, s_img[0].z); } } void centroidParallel(uchar4 *img, int width, int height) { uchar4 *d_img = 0; int *d_centroidRow = 0, *d_centroidCol = 0, *d_pixelCount = 0; int centroidRow = 0, centroidCol = 0, pixelCount = 0; cudaMalloc(&d_img, width * height * sizeof(uchar4)); cudaMemcpy(d_img, img, width * height * sizeof(uchar4), cudaMemcpyHostToDevice); cudaMalloc(&d_centroidRow, sizeof(int )); cudaMalloc(&d_centroidCol, sizeof(int )); cudaMalloc(&d_pixelCount, sizeof(int )); cudaMemset(d_centroidRow, 0, sizeof(int )); cudaMemset(d_centroidCol, 0, sizeof(int )); cudaMemset(d_pixelCount, 0, sizeof(int )); centroidKernel<<<(width * height + TPB - 1) / TPB, TPB>>>(d_img, d_centroidCol, d_centroidRow, d_pixelCount, width, height); cudaMemcpy(&centroidCol, d_centroidCol, sizeof(int ), cudaMemcpyDeviceToHost); cudaMemcpy(&centroidRow, d_centroidRow, sizeof(int ), cudaMemcpyDeviceToHost); cudaMemcpy(&pixelCount, d_pixelCount, sizeof(int ), cudaMemcpyDeviceToHost); centroidCol /= pixelCount; centroidRow /= pixelCount; printf("Centroid: (col: %d, row: %d) based on %d pixels\n", centroidCol, centroidRow, pixelCount); for (int col = 0; col < width; col++) { img[centroidRow * width + col].x = 255; img[centroidRow * width + col].y = 0; img[centroidRow * width + col].z = 0; } for (int row = 0; row < height; row++) { img[row * width + centroidCol].x = 255; img[row * width + centroidCol].y = 0; img[row * width + centroidCol].z = 0; } cudaFree(d_img); cudaFree(d_centroidRow); cudaFree(d_centroidCol); cudaFree(d_pixelCount); }
788b62140ae71c3c055ecd451e3c174e46ea41a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdint.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); hipEventSynchronize(start); } void Stop() { hipEventRecord(stop, 0); } float Elapsed() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; __global__ void computeHistKernel2(int * src, int n, int * hist, int nBins, int bit) { // TODO // Each block computes its local hist using atomic on SMEM extern __shared__ int s[]; for(int i = threadIdx.x; i < nBins; i += blockDim.x) s[i] = 0; __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { int bin = (src[i] >> bit) & (nBins -1); atomicAdd(&s[bin], 1); } __syncthreads(); // Each block adds its local hist to global hist using atomic on GMEM for(int i = threadIdx.x; i < nBins; i += blockDim.x) atomicAdd(&hist[i], s[i]); } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortParallel(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); // In each counting sort, we sort data in "src" and write result to "dst" // Then, we swap these 2 pointers and go to the next counting sort // At first, we assign "src = in" and "dest = out" // However, the data pointed by "in" is read-only // --> we create a copy of this data and assign "src" to the address of this copy uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; uint32_t * temp; dim3 blockSize1(blockSizes[0]); dim3 blockSize2(blockSizes[1]); // Allocate device memories int * d_hist, *d_histScan, * d_in; CHECK(hipMalloc(&d_in, n * sizeof(int))); CHECK(hipMalloc(&d_hist, nBins * sizeof(int))); CHECK(hipMalloc(&d_histScan, nBins * sizeof(int))); // Call kernel dim3 gridSize1((n - 1) / blockSize1.x + 1); dim3 gridSize2((n - 1) / blockSize2.x + 1); size_t smemSize = nBins*sizeof(int); int *d_blkSums; CHECK(hipMalloc(&d_blkSums, gridSize2.x * sizeof(int))); // Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit) // (Each digit consists of nBits bits) // In each loop, sort elements according to the current digit // (using STABLE counting sort) for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit CHECK(hipMemcpy(d_in, src, n * sizeof(int), hipMemcpyHostToDevice)); CHECK(hipMemset(d_hist, 0, nBins * sizeof(int))); hipLaunchKernelGGL(( computeHistKernel2), dim3(gridSize1), dim3(blockSize1), smemSize, 0, d_in, n, d_hist, nBins, bit); CHECK(hipMemcpy(hist, d_hist, nBins * sizeof(int), hipMemcpyDeviceToHost)); // TODO: Scan "hist" (exclusively) and save the result to "histScan" histScan[0] = 0; for(int i = 1; i < nBins; i++) histScan[i] = histScan[i - 1] + hist[i - 1]; // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for(int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins -1); dst[histScan[bin]] = src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" temp = src; src = dst; dst = temp; } // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); // Free device memories CHECK(hipFree(d_in)); CHECK(hipFree(d_hist)); CHECK(hipFree(d_histScan)) CHECK(hipFree(d_blkSums)); } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortByDevice(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO thrust::device_vector<uint32_t> dv_out(in, in + n); thrust::sort(dv_out.begin(), dv_out.end()); thrust::copy(dv_out.begin(), dv_out.end(), out); } // Radix sort void sort(const uint32_t * in, int n, uint32_t * out, int nBits, bool useDevice=false, int * blockSizes=NULL) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nRadix sort parallel scan hist\n"); sortParallel(in, n, out, nBits, blockSizes); } else // use device { printf("\nRadix sort by device\n"); sortByDevice(in, n, out, nBits, blockSizes); } timer.Stop(); printf("Time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { hipDeviceProp_t devProv; CHECK(hipGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } void printArray(uint32_t * a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 20); //n = 10; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t * in = (uint32_t *)malloc(bytes); uint32_t * out = (uint32_t *)malloc(bytes); // Device result uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand(); //printArray(in, n); // SET UP NBITS int nBits = 4; // Default if (argc > 1) nBits = atoi(argv[1]); printf("\nNum bits per digit: %d\n", nBits); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); // SORT BY HOST sort(in, n, correctOut, nBits, false, blockSizes); //printArray(correctOut, n); // SORT BY DEVICE sort(in, n, out, nBits, true, blockSizes); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
788b62140ae71c3c055ecd451e3c174e46ea41a4.cu
#include <stdio.h> #include <stdint.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; __global__ void computeHistKernel2(int * src, int n, int * hist, int nBins, int bit) { // TODO // Each block computes its local hist using atomic on SMEM extern __shared__ int s[]; for(int i = threadIdx.x; i < nBins; i += blockDim.x) s[i] = 0; __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { int bin = (src[i] >> bit) & (nBins -1); atomicAdd(&s[bin], 1); } __syncthreads(); // Each block adds its local hist to global hist using atomic on GMEM for(int i = threadIdx.x; i < nBins; i += blockDim.x) atomicAdd(&hist[i], s[i]); } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortParallel(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); // In each counting sort, we sort data in "src" and write result to "dst" // Then, we swap these 2 pointers and go to the next counting sort // At first, we assign "src = in" and "dest = out" // However, the data pointed by "in" is read-only // --> we create a copy of this data and assign "src" to the address of this copy uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; uint32_t * temp; dim3 blockSize1(blockSizes[0]); dim3 blockSize2(blockSizes[1]); // Allocate device memories int * d_hist, *d_histScan, * d_in; CHECK(cudaMalloc(&d_in, n * sizeof(int))); CHECK(cudaMalloc(&d_hist, nBins * sizeof(int))); CHECK(cudaMalloc(&d_histScan, nBins * sizeof(int))); // Call kernel dim3 gridSize1((n - 1) / blockSize1.x + 1); dim3 gridSize2((n - 1) / blockSize2.x + 1); size_t smemSize = nBins*sizeof(int); int *d_blkSums; CHECK(cudaMalloc(&d_blkSums, gridSize2.x * sizeof(int))); // Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit) // (Each digit consists of nBits bits) // In each loop, sort elements according to the current digit // (using STABLE counting sort) for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit CHECK(cudaMemcpy(d_in, src, n * sizeof(int), cudaMemcpyHostToDevice)); CHECK(cudaMemset(d_hist, 0, nBins * sizeof(int))); computeHistKernel2<<<gridSize1, blockSize1, smemSize>>>(d_in, n, d_hist, nBins, bit); CHECK(cudaMemcpy(hist, d_hist, nBins * sizeof(int), cudaMemcpyDeviceToHost)); // TODO: Scan "hist" (exclusively) and save the result to "histScan" histScan[0] = 0; for(int i = 1; i < nBins; i++) histScan[i] = histScan[i - 1] + hist[i - 1]; // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for(int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins -1); dst[histScan[bin]] = src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" temp = src; src = dst; dst = temp; } // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); // Free device memories CHECK(cudaFree(d_in)); CHECK(cudaFree(d_hist)); CHECK(cudaFree(d_histScan)) CHECK(cudaFree(d_blkSums)); } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortByDevice(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO thrust::device_vector<uint32_t> dv_out(in, in + n); thrust::sort(dv_out.begin(), dv_out.end()); thrust::copy(dv_out.begin(), dv_out.end(), out); } // Radix sort void sort(const uint32_t * in, int n, uint32_t * out, int nBits, bool useDevice=false, int * blockSizes=NULL) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nRadix sort parallel scan hist\n"); sortParallel(in, n, out, nBits, blockSizes); } else // use device { printf("\nRadix sort by device\n"); sortByDevice(in, n, out, nBits, blockSizes); } timer.Stop(); printf("Time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } void printArray(uint32_t * a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 20); //n = 10; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t * in = (uint32_t *)malloc(bytes); uint32_t * out = (uint32_t *)malloc(bytes); // Device result uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand(); //printArray(in, n); // SET UP NBITS int nBits = 4; // Default if (argc > 1) nBits = atoi(argv[1]); printf("\nNum bits per digit: %d\n", nBits); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); // SORT BY HOST sort(in, n, correctOut, nBits, false, blockSizes); //printArray(correctOut, n); // SORT BY DEVICE sort(in, n, out, nBits, true, blockSizes); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
508da419b82d7edfeb93c90b1d5e1f371a4252be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include <string> #include "loadSaveImage.h" #include <thrust/extrema.h> #include "reference_calc.h" //chroma-LogLuminance Space static float *d_x__; static float *d_y__; static float *d_logY__; //memory for the cdf static unsigned int *d_cdf__; static const int numBins = 1024; size_t numRows__; size_t numCols__; /* Copied from Mike's IPython notebook with some minor modifications * Mainly double precision constants to floats and log10 -> log10f * Also removed Luminance (Y) channel since it is never used eke*/ __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } /* Copied from Mike's IPython notebook * Modified just by having threads read the normalization constant directly from device memory instead of copying it back */ __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } /* Copied from Mike's IPython notebook * Modified double constants -> float * Perform tone mapping based upon new * luminance scaling */ __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } //return types are void since any internal error will be handled by quitting //no point in returning error codes... void preProcess(float** d_luminance, unsigned int** d_cdf, size_t *numRows, size_t *numCols, unsigned int *numberOfBins, const std::string &filename) { //make sure the context initializes ok checkCudaErrors(hipFree(0)); float *imgPtr; //we will become responsible for this pointer loadImageHDR(filename, &imgPtr, &numRows__, &numCols__); *numRows = numRows__; *numCols = numCols__; //first thing to do is split incoming BGR float data into separate channels size_t numPixels = numRows__ * numCols__; float *red = new float[numPixels]; float *green = new float[numPixels]; float *blue = new float[numPixels]; //Remeber image is loaded BGR for (size_t i = 0; i < numPixels; ++i) { blue[i] = imgPtr[3 * i + 0]; green[i] = imgPtr[3 * i + 1]; red[i] = imgPtr[3 * i + 2]; } delete[] imgPtr; //being good citizens are releasing resources //allocated in loadImageHDR float *d_red, *d_green, *d_blue; //RGB space size_t channelSize = sizeof(float) * numPixels; checkCudaErrors(hipMalloc(&d_red, channelSize)); checkCudaErrors(hipMalloc(&d_green, channelSize)); checkCudaErrors(hipMalloc(&d_blue, channelSize)); checkCudaErrors(hipMalloc(&d_x__, channelSize)); checkCudaErrors(hipMalloc(&d_y__, channelSize)); checkCudaErrors(hipMalloc(&d_logY__, channelSize)); checkCudaErrors(hipMemcpy(d_red, red, channelSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_green, green, channelSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_blue, blue, channelSize, hipMemcpyHostToDevice)); //convert from RGB space to chrominance/luminance space xyY const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x, (numRows__ + blockSize.y - 1) / blockSize.y, 1); hipLaunchKernelGGL(( rgb_to_xyY), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue, d_x__, d_y__, d_logY__, .0001f, numRows__, numCols__); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); *d_luminance = d_logY__; //allocate memory for the cdf of the histogram *numberOfBins = numBins; checkCudaErrors(hipMalloc(&d_cdf__, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemset(d_cdf__, 0, sizeof(unsigned int) * numBins)); *d_cdf = d_cdf__; checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); delete[] red; delete[] green; delete[] blue; } void postProcess(const std::string& output_file, size_t numRows, size_t numCols, float min_log_Y, float max_log_Y) { const int numPixels = numRows__ * numCols__; const int numThreads = 192; float *d_cdf_normalized; checkCudaErrors(hipMalloc(&d_cdf_normalized, sizeof(float) * numBins)); //first normalize the cdf to a maximum value of 1 //this is how we compress the range of the luminance channel hipLaunchKernelGGL(( normalize_cdf), dim3((numBins + numThreads - 1) / numThreads), dim3(numThreads), 0, 0, d_cdf__, d_cdf_normalized, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //allocate memory for the output RGB channels float *h_red, *h_green, *h_blue; float *d_red, *d_green, *d_blue; h_red = new float[numPixels]; h_green = new float[numPixels]; h_blue = new float[numPixels]; checkCudaErrors(hipMalloc(&d_red, sizeof(float) * numPixels)); checkCudaErrors(hipMalloc(&d_green, sizeof(float) * numPixels)); checkCudaErrors(hipMalloc(&d_blue, sizeof(float) * numPixels)); float log_Y_range = max_log_Y - min_log_Y; const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y ); //next perform the actual tone-mapping //we map each luminance value to its new value //and then transform back to RGB space hipLaunchKernelGGL(( tonemap), dim3(gridSize), dim3(blockSize), 0, 0, d_x__, d_y__, d_logY__, d_cdf_normalized, d_red, d_green, d_blue, min_log_Y, max_log_Y, log_Y_range, numBins, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(h_red, d_red, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_green, d_green, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_blue, d_blue, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); //recombine the image channels float *imageHDR = new float[numPixels * 3]; for (int i = 0; i < numPixels; ++i) { imageHDR[3 * i + 0] = h_blue[i]; imageHDR[3 * i + 1] = h_green[i]; imageHDR[3 * i + 2] = h_red[i]; } saveImageHDR(imageHDR, numRows, numCols, output_file); delete[] imageHDR; delete[] h_red; delete[] h_green; delete[] h_blue; //cleanup checkCudaErrors(hipFree(d_x__)); checkCudaErrors(hipFree(d_y__)); checkCudaErrors(hipFree(d_logY__)); checkCudaErrors(hipFree(d_cdf__)); checkCudaErrors(hipFree(d_cdf_normalized)); }
508da419b82d7edfeb93c90b1d5e1f371a4252be.cu
#include "utils.h" #include <string> #include "loadSaveImage.h" #include <thrust/extrema.h> #include "reference_calc.h" //chroma-LogLuminance Space static float *d_x__; static float *d_y__; static float *d_logY__; //memory for the cdf static unsigned int *d_cdf__; static const int numBins = 1024; size_t numRows__; size_t numCols__; /* Copied from Mike's IPython notebook with some minor modifications * Mainly double precision constants to floats and log10 -> log10f * Also removed Luminance (Y) channel since it is never used eke*/ __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } /* Copied from Mike's IPython notebook * Modified just by having threads read the normalization constant directly from device memory instead of copying it back */ __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } /* Copied from Mike's IPython notebook * Modified double constants -> float * Perform tone mapping based upon new * luminance scaling */ __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } //return types are void since any internal error will be handled by quitting //no point in returning error codes... void preProcess(float** d_luminance, unsigned int** d_cdf, size_t *numRows, size_t *numCols, unsigned int *numberOfBins, const std::string &filename) { //make sure the context initializes ok checkCudaErrors(cudaFree(0)); float *imgPtr; //we will become responsible for this pointer loadImageHDR(filename, &imgPtr, &numRows__, &numCols__); *numRows = numRows__; *numCols = numCols__; //first thing to do is split incoming BGR float data into separate channels size_t numPixels = numRows__ * numCols__; float *red = new float[numPixels]; float *green = new float[numPixels]; float *blue = new float[numPixels]; //Remeber image is loaded BGR for (size_t i = 0; i < numPixels; ++i) { blue[i] = imgPtr[3 * i + 0]; green[i] = imgPtr[3 * i + 1]; red[i] = imgPtr[3 * i + 2]; } delete[] imgPtr; //being good citizens are releasing resources //allocated in loadImageHDR float *d_red, *d_green, *d_blue; //RGB space size_t channelSize = sizeof(float) * numPixels; checkCudaErrors(cudaMalloc(&d_red, channelSize)); checkCudaErrors(cudaMalloc(&d_green, channelSize)); checkCudaErrors(cudaMalloc(&d_blue, channelSize)); checkCudaErrors(cudaMalloc(&d_x__, channelSize)); checkCudaErrors(cudaMalloc(&d_y__, channelSize)); checkCudaErrors(cudaMalloc(&d_logY__, channelSize)); checkCudaErrors(cudaMemcpy(d_red, red, channelSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_green, green, channelSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_blue, blue, channelSize, cudaMemcpyHostToDevice)); //convert from RGB space to chrominance/luminance space xyY const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x, (numRows__ + blockSize.y - 1) / blockSize.y, 1); rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_green, d_blue, d_x__, d_y__, d_logY__, .0001f, numRows__, numCols__); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); *d_luminance = d_logY__; //allocate memory for the cdf of the histogram *numberOfBins = numBins; checkCudaErrors(cudaMalloc(&d_cdf__, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemset(d_cdf__, 0, sizeof(unsigned int) * numBins)); *d_cdf = d_cdf__; checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); delete[] red; delete[] green; delete[] blue; } void postProcess(const std::string& output_file, size_t numRows, size_t numCols, float min_log_Y, float max_log_Y) { const int numPixels = numRows__ * numCols__; const int numThreads = 192; float *d_cdf_normalized; checkCudaErrors(cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins)); //first normalize the cdf to a maximum value of 1 //this is how we compress the range of the luminance channel normalize_cdf<<< (numBins + numThreads - 1) / numThreads, numThreads>>>(d_cdf__, d_cdf_normalized, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //allocate memory for the output RGB channels float *h_red, *h_green, *h_blue; float *d_red, *d_green, *d_blue; h_red = new float[numPixels]; h_green = new float[numPixels]; h_blue = new float[numPixels]; checkCudaErrors(cudaMalloc(&d_red, sizeof(float) * numPixels)); checkCudaErrors(cudaMalloc(&d_green, sizeof(float) * numPixels)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(float) * numPixels)); float log_Y_range = max_log_Y - min_log_Y; const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y ); //next perform the actual tone-mapping //we map each luminance value to its new value //and then transform back to RGB space tonemap<<<gridSize, blockSize>>>(d_x__, d_y__, d_logY__, d_cdf_normalized, d_red, d_green, d_blue, min_log_Y, max_log_Y, log_Y_range, numBins, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(h_red, d_red, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_green, d_green, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_blue, d_blue, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); //recombine the image channels float *imageHDR = new float[numPixels * 3]; for (int i = 0; i < numPixels; ++i) { imageHDR[3 * i + 0] = h_blue[i]; imageHDR[3 * i + 1] = h_green[i]; imageHDR[3 * i + 2] = h_red[i]; } saveImageHDR(imageHDR, numRows, numCols, output_file); delete[] imageHDR; delete[] h_red; delete[] h_green; delete[] h_blue; //cleanup checkCudaErrors(cudaFree(d_x__)); checkCudaErrors(cudaFree(d_y__)); checkCudaErrors(cudaFree(d_logY__)); checkCudaErrors(cudaFree(d_cdf__)); checkCudaErrors(cudaFree(d_cdf_normalized)); }
7b99fcf83c20ff521b96d002d78ae4a6e317605b.hip
// !!! This is a file automatically generated by hipify!!! /*------------------- BEGIN box_model_cu_integrate.cu BEGIN -------------------*/ /* @file box_model_cu_integrate.cu */ /* @author charlesj */ /* @date 2014-05-23 15:17:05.134548 */ /* @brief Interface to time stepping integrator */ /* */ /* Definitions of interface functions for the Kppa-generated */ /* time stepping integrator. These are the Kppa "entry point" routines. */ /* */ /* This file was generated by Kppa: http://www.paratools.com/Kppa */ /*-----------------------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include "box_model_cu_parameters.h" #include "box_model_sparse.h" #include "box_model_cu_integrate.h" #include "box_model_rosenbrock.h" /*-------------------------------- StageToHost --------------------------------*/ /* Stages concentration data into host-side memory in preparation */ /* for transfer to device memory. Data is reorganized to promote */ /* data access coalescing. */ /* */ /* @param[in] ncells Number of grid cells */ /* @param[out] h_conc Species concentrations in page-locked host memory */ /* @param[in] pitch Data pitch (a.k.a stride) */ /* @param[in] conc Species concentrations */ /*-----------------------------------------------------------------------------*/ void StageToHost(size_t const ncells, double* h_conc, size_t const pitch, double const conc[]) { for(int i=0; i<ncells; ++i) { for(int j=0; j<NSPEC; ++j) { h_conc[j*pitch + i] = *(conc++); } } }/* END StageToHost */ /*----------------------------- TempStageToHost -------------------------------*/ /* Stages temperatures data into host-side memory in preparation */ /* for transfer to device memory. Data is reorganized to promote */ /* data access coalescing. */ /* */ /* @param[in] ncells Number of grid cells */ /* @param[out] h_temp Temperatures in page-locked host memory */ /* @param[in] TEMP Temperatures */ /*-----------------------------------------------------------------------------*/ void TempStageToHost(size_t const ncells, double* h_temp, double const TEMP[]) { for(int i=0; i<ncells; ++i) { h_temp[i] = *(TEMP++); } }/* END TempStageToHost */ /*------------------------------- StageFromHost -------------------------------*/ /* Stages concentration data out of host-side memory after it is */ /* retrieved from the device. Original data order is restored. */ /* */ /* @param[in] ncells Number of grid cells */ /* @param[in] h_conc Species concentrations in page-locked host memory */ /* @param[in] pitch Data pitch (a.k.a stride) */ /* @param[out] conc Species concentrations */ /*-----------------------------------------------------------------------------*/ void StageFromHost(size_t const ncells, double const * h_conc, size_t const pitch, double conc[]) { for(int i=0; i<ncells; ++i) { for(int j=0; j<NSPEC; ++j) { *(conc++) = h_conc[j*pitch + i]; } } }/* END StageFromHost */ /*------------------------------ GetBestDevice -------------------------------*/ /* Scans the host system for CUDA devices of compute capability */ /* 2.0 or better. If multiple devices are found, the "most powerful" */ /* device is selected. */ /* */ /* @param[out] devNumberOut The number of the selected CUDA device */ /* @param[out] propsOut The properties of the selected CUDA device */ /* @return 0 on success, or negative value on error */ /*----------------------------------------------------------------------------*/ int GetBestDevice(int * devNumberOut, hipDeviceProp_t * propsOut) { hipDeviceProp_t bestProps, props; int devCount = 0; int devNumber = 0; /* Get count of cuda devices */ switch(hipGetDeviceCount(&devCount)) { case hipErrorNoDevice: printf("Kppa: No CUDA devices detected.\n"); return -1; case hipErrorInsufficientDriver: printf("Kppa: Could not load CUDA driver.\n"); return -1; case hipSuccess: #ifdef SHOW_CUDA_DEVICE_PROPERTIES printf("Kppa: %d CUDA devices detected.\n", devCount); #endif break; default: printf("Kppa: Unknown return value from hipGetDeviceCount\n"); return -1; } /* Get the first device's properties */ if (hipGetDeviceProperties(&bestProps, 0) != hipSuccess) { printf("Kppa: Failed to get properties of CUDA device 0.\n"); return -1; } /* Find the best device */ for(int i=1; i<devCount; ++i) { if (hipGetDeviceProperties(&props, i) != hipSuccess) { printf("Kppa: Failed to get properties of CUDA device %d.\n", i); continue; } /* Filter unsupported devices */ if(props.major < 2) { printf("Kppa: Ignoring device %d: Compute capability < 2.0 .\n", i); continue; } if(props.warpSize != 32) { printf("Kppa: Ignoring device %d: Warp size != 32.\n", i); continue; } if((props.major > bestProps.major) || (props.major == bestProps.major && props.minor > bestProps.minor) || (props.multiProcessorCount > bestProps.multiProcessorCount) || (props.totalGlobalMem > bestProps.totalGlobalMem) || (props.regsPerBlock > bestProps.regsPerBlock) || (props.sharedMemPerBlock > bestProps.sharedMemPerBlock)) { bestProps = props; devNumber = i; } } #ifdef SHOW_CUDA_DEVICE_PROPERTIES /* Report on the selected device */ printf("Kppa: Selected device %d.\n", devNumber); printf("Kppa: Device %d: Name: %s\n", devNumber, bestProps.name); printf("Kppa: Device %d: Version: %d.%d\n", devNumber, bestProps.major, bestProps.minor); printf("Kppa: Device %d: Warp Size: %d\n", devNumber, bestProps.warpSize); printf("Kppa: Device %d: Clock Rate (MHz): %d\n", devNumber, bestProps.clockRate/1000); printf("Kppa: Device %d: Total Multiprocessors: %d\n", devNumber, bestProps.multiProcessorCount); printf("Kppa: Device %d: Total Global Memory (MB): %ld\n", devNumber, bestProps.totalGlobalMem>>20); printf("Kppa: Device %d: Total Const. Memory (KB): %ld\n", devNumber, bestProps.totalConstMem>>10); printf("Kppa: Device %d: Block Shared Memory (KB): %ld\n", devNumber, bestProps.sharedMemPerBlock>>10); printf("Kppa: Device %d: Block Registers: %d\n", devNumber, bestProps.regsPerBlock); printf("Kppa: Device %d: Max Threads per Block: %d\n", devNumber, bestProps.maxThreadsPerBlock); printf("Kppa: Device %d: Max Memory Pitch (KB): %ld\n", devNumber, bestProps.memPitch>>10); printf("Kppa: Device %d: Max Threadblock Dim: (%d, %d, %d)\n", devNumber, bestProps.maxThreadsDim[0], bestProps.maxThreadsDim[1], bestProps.maxThreadsDim[2]); printf("Kppa: Device %d: Max Grid Size: (%d, %d, %d)\n", devNumber, bestProps.maxGridSize[0], bestProps.maxGridSize[1], bestProps.maxGridSize[2]); printf("Kppa: Device %d: Texture Alignment (B): %ld\n", devNumber, bestProps.textureAlignment); printf("Kppa: Device %d: Supports Device Overlap?: %s\n", devNumber, bestProps.deviceOverlap ? "yes" : "no"); #endif *devNumberOut = devNumber; *propsOut = bestProps; return 0; } /* END GetBestDevice */ /*------------------------------- GridIntegrate -------------------------------*/ /* Applies the Kppa-generated integrator to the grid */ /* */ /* @param[in] ncells Number of grid cells */ /* @param[in,out] conc Species concentrations */ /* @param[in] tstart Integration start time */ /* @param[in] tend Integration end time */ /* @param[in] abstol Absolute integration tolerances for variable species */ /* @param[in] reltol Relative integration tolerances for variable species */ /* @param[in,out] idata Integer integration in/out parameters */ /* @param[in,out] rdata Real value integration in/out parameters */ /* @param[in] TEMP Temperatures in kelvin */ /*-----------------------------------------------------------------------------*/ int GridIntegrate(size_t const ncells, double conc[], double const tstart, double const tend, double const abstol[NVAR], double const reltol[NVAR], int idata[20], double rdata[20], long long int ISTATS[8], double TEMP[]) { #define ABORT(code, fmt, ...) { \ printf("Kppa: Failure in CUDA integrator: " fmt, ##__VA_ARGS__); \ idata[19] = code; \ return code; \ } static int device = -1; static size_t chunk = 0; static size_t chunk32 = 0; static size_t stagesize = 0; static size_t tempstagesize = 0; /* Return value */ int retval = 0; /* Solver initialization */ if(device == -1) { /* Select the "best" CUDA device */ hipDeviceProp_t props; if(GetBestDevice(&device, &props)) ABORT(-20, "No suitable CUDA device found.\n"); if(hipSetDevice(device) != hipSuccess) ABORT(-20, "Failed to select CUDA device %d.\n", device); /* Estimate overhead in device memory */ size_t overhead = 500*0x100000UL + NVAR*(sizeof(double) + sizeof(int)); /* Estimate cell size in bytes */ size_t cellsize = sizeof(double) * (NSPEC // Concentrations + NREACT // Reaction rates + 11*NVAR // Integrator tolerances, working data and 6 stages + 3*JAC_LU_NZ); // Integrator working data /* Estimate number of cells that will fit in device memory */ chunk = (props.totalGlobalMem - overhead) / cellsize; /* Don't exceed device grid limits */ size_t maxblocks = (props.maxGridSize[0] / JAC_LU_NZ) * 256; if(chunk > maxblocks) chunk = maxblocks; /* Don't exceed total number of cells */ if(chunk > ncells) chunk = ncells; /* Round up to next multiple of 32 */ chunk32 = (chunk + 31) & ~31; stagesize = NSPEC*chunk32*sizeof(double); tempstagesize = chunk32*sizeof(double); } /* Allocate write combined, page-locked host memory */ /* Species concentrations in page-locked host memory */ double* h_conc; /* Temperatures in page-locked host memory */ double* h_temp; if(hipHostMalloc(&h_conc, stagesize, hipHostMallocWriteCombined) != hipSuccess) { /* Fall back to page-locked only */ printf("Kppa: Warning: Can't allocate write combined page-locked host memory.\n"); retval = 1; if(hipHostMalloc(&h_conc, stagesize) != hipSuccess) { /* Fall back to regular malloc */ printf("Kppa: Warning: Can't allocate page-locked host memory.\n"); if(!(h_conc = (double*)malloc(stagesize))) ABORT(-20, "Failed to allocate host memory.\n"); } } if(hipHostMalloc(&h_temp, tempstagesize, hipHostMallocWriteCombined) != hipSuccess) { /* Fall back to page-locked only */ printf("Kppa: Warning: Can't allocate write combined page-locked host memory.\n"); retval = 1; if(hipHostMalloc(&h_temp, tempstagesize) != hipSuccess) { /* Fall back to regular malloc */ printf("Kppa: Warning: Can't allocate page-locked host memory.\n"); if(!(h_temp = (double*)malloc(tempstagesize))) ABORT(-20, "Failed to allocate host memory.\n"); } } /* Allocate device memory */ /* Species concentrations in device memory */ double* d_conc; /* Temperatures in device memory */ double* d_temp; if(hipMalloc(&d_conc, stagesize) != hipSuccess) ABORT(-20, "Can't allocate device memory.\n"); if(hipMalloc(&d_temp, tempstagesize) != hipSuccess) ABORT(-20, "Can't allocate device memory.\n"); for(size_t i=0; i<ncells; i+=chunk) { /* Chunk up to the edge of the grid */ size_t remainder = ncells - i; if(remainder < chunk) { chunk = remainder; chunk32 = (remainder + 31) & ~31; } /* Stage concentration data onto device */ StageToHost(chunk, h_conc, chunk32, conc + i*NSPEC); if(hipMemcpy(d_conc, h_conc, stagesize, hipMemcpyHostToDevice) != hipSuccess) ABORT(-20, "Can't copy concentration data to device\n"); /* Stage temperature data onto device */ TempStageToHost(chunk, h_temp, TEMP + i); if(hipMemcpy(d_temp, h_temp, tempstagesize, hipMemcpyHostToDevice) != hipSuccess) ABORT(-20, "Can't copy temperature data to device\n"); /* Point to variable and fixed concentrations */ double * d_var = d_conc; double * d_fix = d_conc + NVAR*chunk32; /* Invoke the integrator on this block of grid cells */ Integrate(chunk, d_var, d_fix, i, tstart, tend, abstol, reltol, idata, rdata, d_temp); /* Retrieve concentration data from device */ if(hipMemcpy(h_conc, d_conc, stagesize, hipMemcpyDeviceToHost) != hipSuccess) ABORT(-20, "Can't copy concentration data to device\n"); StageFromHost(chunk, h_conc, chunk32, conc + i*NSPEC); /* Integrator statistics */ // No. of function calls ISTATS[0] += idata[10]; // No. of jacobian calls ISTATS[1] += idata[11]; // No. of steps ISTATS[2] += idata[12]; // No. of accepted steps ISTATS[3] += idata[13]; // No. of rejected steps (except at very beginning) ISTATS[4] += idata[14]; // No. of LU decompositions ISTATS[5] += idata[15]; // No. of forward/backward substitutions ISTATS[6] += idata[16]; // No. of singular matrix decompositions ISTATS[7] += idata[17]; /* Process integrator return code */ if (idata[19] < 0) { printf("Kppa: CELL CHUNK %zd -- INTEGRATION FAILED\n", i); for(int j=0; j<20; ++j) printf("Kppa: CELL CHUNK %zd, idata[%d] = %d\n", i, j, idata[j]); for(int j=0; j<20; ++j) printf("Kppa: CELL CHUNK %zd, rdata[%d] = %g\n", i, j, rdata[j]); if (idata[19] < retval) retval = idata[19]; } else if (idata[19] > 0) { printf("Kppa: CELL CHUNK %zd -- INTEGRATION COMPLETED WITH WARNING\n", i); if (retval >= 0 && idata[19] > retval) retval = idata[19]; } } /* Deallocate memory and return*/ hipFree(d_conc); hipHostFree(h_conc); hipFree(d_temp); hipHostFree(h_temp); return retval; }/* END GridIntegrate */ /*---------------------- END box_model_cu_integrate.h END ---------------------*/
7b99fcf83c20ff521b96d002d78ae4a6e317605b.cu
/*------------------- BEGIN box_model_cu_integrate.cu BEGIN -------------------*/ /* @file box_model_cu_integrate.cu */ /* @author charlesj */ /* @date 2014-05-23 15:17:05.134548 */ /* @brief Interface to time stepping integrator */ /* */ /* Definitions of interface functions for the Kppa-generated */ /* time stepping integrator. These are the Kppa "entry point" routines. */ /* */ /* This file was generated by Kppa: http://www.paratools.com/Kppa */ /*-----------------------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include "box_model_cu_parameters.h" #include "box_model_sparse.h" #include "box_model_cu_integrate.h" #include "box_model_rosenbrock.h" /*-------------------------------- StageToHost --------------------------------*/ /* Stages concentration data into host-side memory in preparation */ /* for transfer to device memory. Data is reorganized to promote */ /* data access coalescing. */ /* */ /* @param[in] ncells Number of grid cells */ /* @param[out] h_conc Species concentrations in page-locked host memory */ /* @param[in] pitch Data pitch (a.k.a stride) */ /* @param[in] conc Species concentrations */ /*-----------------------------------------------------------------------------*/ void StageToHost(size_t const ncells, double* h_conc, size_t const pitch, double const conc[]) { for(int i=0; i<ncells; ++i) { for(int j=0; j<NSPEC; ++j) { h_conc[j*pitch + i] = *(conc++); } } }/* END StageToHost */ /*----------------------------- TempStageToHost -------------------------------*/ /* Stages temperatures data into host-side memory in preparation */ /* for transfer to device memory. Data is reorganized to promote */ /* data access coalescing. */ /* */ /* @param[in] ncells Number of grid cells */ /* @param[out] h_temp Temperatures in page-locked host memory */ /* @param[in] TEMP Temperatures */ /*-----------------------------------------------------------------------------*/ void TempStageToHost(size_t const ncells, double* h_temp, double const TEMP[]) { for(int i=0; i<ncells; ++i) { h_temp[i] = *(TEMP++); } }/* END TempStageToHost */ /*------------------------------- StageFromHost -------------------------------*/ /* Stages concentration data out of host-side memory after it is */ /* retrieved from the device. Original data order is restored. */ /* */ /* @param[in] ncells Number of grid cells */ /* @param[in] h_conc Species concentrations in page-locked host memory */ /* @param[in] pitch Data pitch (a.k.a stride) */ /* @param[out] conc Species concentrations */ /*-----------------------------------------------------------------------------*/ void StageFromHost(size_t const ncells, double const * h_conc, size_t const pitch, double conc[]) { for(int i=0; i<ncells; ++i) { for(int j=0; j<NSPEC; ++j) { *(conc++) = h_conc[j*pitch + i]; } } }/* END StageFromHost */ /*------------------------------ GetBestDevice -------------------------------*/ /* Scans the host system for CUDA devices of compute capability */ /* 2.0 or better. If multiple devices are found, the "most powerful" */ /* device is selected. */ /* */ /* @param[out] devNumberOut The number of the selected CUDA device */ /* @param[out] propsOut The properties of the selected CUDA device */ /* @return 0 on success, or negative value on error */ /*----------------------------------------------------------------------------*/ int GetBestDevice(int * devNumberOut, cudaDeviceProp * propsOut) { cudaDeviceProp bestProps, props; int devCount = 0; int devNumber = 0; /* Get count of cuda devices */ switch(cudaGetDeviceCount(&devCount)) { case cudaErrorNoDevice: printf("Kppa: No CUDA devices detected.\n"); return -1; case cudaErrorInsufficientDriver: printf("Kppa: Could not load CUDA driver.\n"); return -1; case cudaSuccess: #ifdef SHOW_CUDA_DEVICE_PROPERTIES printf("Kppa: %d CUDA devices detected.\n", devCount); #endif break; default: printf("Kppa: Unknown return value from cudaGetDeviceCount\n"); return -1; } /* Get the first device's properties */ if (cudaGetDeviceProperties(&bestProps, 0) != cudaSuccess) { printf("Kppa: Failed to get properties of CUDA device 0.\n"); return -1; } /* Find the best device */ for(int i=1; i<devCount; ++i) { if (cudaGetDeviceProperties(&props, i) != cudaSuccess) { printf("Kppa: Failed to get properties of CUDA device %d.\n", i); continue; } /* Filter unsupported devices */ if(props.major < 2) { printf("Kppa: Ignoring device %d: Compute capability < 2.0 .\n", i); continue; } if(props.warpSize != 32) { printf("Kppa: Ignoring device %d: Warp size != 32.\n", i); continue; } if((props.major > bestProps.major) || (props.major == bestProps.major && props.minor > bestProps.minor) || (props.multiProcessorCount > bestProps.multiProcessorCount) || (props.totalGlobalMem > bestProps.totalGlobalMem) || (props.regsPerBlock > bestProps.regsPerBlock) || (props.sharedMemPerBlock > bestProps.sharedMemPerBlock)) { bestProps = props; devNumber = i; } } #ifdef SHOW_CUDA_DEVICE_PROPERTIES /* Report on the selected device */ printf("Kppa: Selected device %d.\n", devNumber); printf("Kppa: Device %d: Name: %s\n", devNumber, bestProps.name); printf("Kppa: Device %d: Version: %d.%d\n", devNumber, bestProps.major, bestProps.minor); printf("Kppa: Device %d: Warp Size: %d\n", devNumber, bestProps.warpSize); printf("Kppa: Device %d: Clock Rate (MHz): %d\n", devNumber, bestProps.clockRate/1000); printf("Kppa: Device %d: Total Multiprocessors: %d\n", devNumber, bestProps.multiProcessorCount); printf("Kppa: Device %d: Total Global Memory (MB): %ld\n", devNumber, bestProps.totalGlobalMem>>20); printf("Kppa: Device %d: Total Const. Memory (KB): %ld\n", devNumber, bestProps.totalConstMem>>10); printf("Kppa: Device %d: Block Shared Memory (KB): %ld\n", devNumber, bestProps.sharedMemPerBlock>>10); printf("Kppa: Device %d: Block Registers: %d\n", devNumber, bestProps.regsPerBlock); printf("Kppa: Device %d: Max Threads per Block: %d\n", devNumber, bestProps.maxThreadsPerBlock); printf("Kppa: Device %d: Max Memory Pitch (KB): %ld\n", devNumber, bestProps.memPitch>>10); printf("Kppa: Device %d: Max Threadblock Dim: (%d, %d, %d)\n", devNumber, bestProps.maxThreadsDim[0], bestProps.maxThreadsDim[1], bestProps.maxThreadsDim[2]); printf("Kppa: Device %d: Max Grid Size: (%d, %d, %d)\n", devNumber, bestProps.maxGridSize[0], bestProps.maxGridSize[1], bestProps.maxGridSize[2]); printf("Kppa: Device %d: Texture Alignment (B): %ld\n", devNumber, bestProps.textureAlignment); printf("Kppa: Device %d: Supports Device Overlap?: %s\n", devNumber, bestProps.deviceOverlap ? "yes" : "no"); #endif *devNumberOut = devNumber; *propsOut = bestProps; return 0; } /* END GetBestDevice */ /*------------------------------- GridIntegrate -------------------------------*/ /* Applies the Kppa-generated integrator to the grid */ /* */ /* @param[in] ncells Number of grid cells */ /* @param[in,out] conc Species concentrations */ /* @param[in] tstart Integration start time */ /* @param[in] tend Integration end time */ /* @param[in] abstol Absolute integration tolerances for variable species */ /* @param[in] reltol Relative integration tolerances for variable species */ /* @param[in,out] idata Integer integration in/out parameters */ /* @param[in,out] rdata Real value integration in/out parameters */ /* @param[in] TEMP Temperatures in kelvin */ /*-----------------------------------------------------------------------------*/ int GridIntegrate(size_t const ncells, double conc[], double const tstart, double const tend, double const abstol[NVAR], double const reltol[NVAR], int idata[20], double rdata[20], long long int ISTATS[8], double TEMP[]) { #define ABORT(code, fmt, ...) { \ printf("Kppa: Failure in CUDA integrator: " fmt, ##__VA_ARGS__); \ idata[19] = code; \ return code; \ } static int device = -1; static size_t chunk = 0; static size_t chunk32 = 0; static size_t stagesize = 0; static size_t tempstagesize = 0; /* Return value */ int retval = 0; /* Solver initialization */ if(device == -1) { /* Select the "best" CUDA device */ cudaDeviceProp props; if(GetBestDevice(&device, &props)) ABORT(-20, "No suitable CUDA device found.\n"); if(cudaSetDevice(device) != cudaSuccess) ABORT(-20, "Failed to select CUDA device %d.\n", device); /* Estimate overhead in device memory */ size_t overhead = 500*0x100000UL + NVAR*(sizeof(double) + sizeof(int)); /* Estimate cell size in bytes */ size_t cellsize = sizeof(double) * (NSPEC // Concentrations + NREACT // Reaction rates + 11*NVAR // Integrator tolerances, working data and 6 stages + 3*JAC_LU_NZ); // Integrator working data /* Estimate number of cells that will fit in device memory */ chunk = (props.totalGlobalMem - overhead) / cellsize; /* Don't exceed device grid limits */ size_t maxblocks = (props.maxGridSize[0] / JAC_LU_NZ) * 256; if(chunk > maxblocks) chunk = maxblocks; /* Don't exceed total number of cells */ if(chunk > ncells) chunk = ncells; /* Round up to next multiple of 32 */ chunk32 = (chunk + 31) & ~31; stagesize = NSPEC*chunk32*sizeof(double); tempstagesize = chunk32*sizeof(double); } /* Allocate write combined, page-locked host memory */ /* Species concentrations in page-locked host memory */ double* h_conc; /* Temperatures in page-locked host memory */ double* h_temp; if(cudaHostAlloc(&h_conc, stagesize, cudaHostAllocWriteCombined) != cudaSuccess) { /* Fall back to page-locked only */ printf("Kppa: Warning: Can't allocate write combined page-locked host memory.\n"); retval = 1; if(cudaMallocHost(&h_conc, stagesize) != cudaSuccess) { /* Fall back to regular malloc */ printf("Kppa: Warning: Can't allocate page-locked host memory.\n"); if(!(h_conc = (double*)malloc(stagesize))) ABORT(-20, "Failed to allocate host memory.\n"); } } if(cudaHostAlloc(&h_temp, tempstagesize, cudaHostAllocWriteCombined) != cudaSuccess) { /* Fall back to page-locked only */ printf("Kppa: Warning: Can't allocate write combined page-locked host memory.\n"); retval = 1; if(cudaMallocHost(&h_temp, tempstagesize) != cudaSuccess) { /* Fall back to regular malloc */ printf("Kppa: Warning: Can't allocate page-locked host memory.\n"); if(!(h_temp = (double*)malloc(tempstagesize))) ABORT(-20, "Failed to allocate host memory.\n"); } } /* Allocate device memory */ /* Species concentrations in device memory */ double* d_conc; /* Temperatures in device memory */ double* d_temp; if(cudaMalloc(&d_conc, stagesize) != cudaSuccess) ABORT(-20, "Can't allocate device memory.\n"); if(cudaMalloc(&d_temp, tempstagesize) != cudaSuccess) ABORT(-20, "Can't allocate device memory.\n"); for(size_t i=0; i<ncells; i+=chunk) { /* Chunk up to the edge of the grid */ size_t remainder = ncells - i; if(remainder < chunk) { chunk = remainder; chunk32 = (remainder + 31) & ~31; } /* Stage concentration data onto device */ StageToHost(chunk, h_conc, chunk32, conc + i*NSPEC); if(cudaMemcpy(d_conc, h_conc, stagesize, cudaMemcpyHostToDevice) != cudaSuccess) ABORT(-20, "Can't copy concentration data to device\n"); /* Stage temperature data onto device */ TempStageToHost(chunk, h_temp, TEMP + i); if(cudaMemcpy(d_temp, h_temp, tempstagesize, cudaMemcpyHostToDevice) != cudaSuccess) ABORT(-20, "Can't copy temperature data to device\n"); /* Point to variable and fixed concentrations */ double * d_var = d_conc; double * d_fix = d_conc + NVAR*chunk32; /* Invoke the integrator on this block of grid cells */ Integrate(chunk, d_var, d_fix, i, tstart, tend, abstol, reltol, idata, rdata, d_temp); /* Retrieve concentration data from device */ if(cudaMemcpy(h_conc, d_conc, stagesize, cudaMemcpyDeviceToHost) != cudaSuccess) ABORT(-20, "Can't copy concentration data to device\n"); StageFromHost(chunk, h_conc, chunk32, conc + i*NSPEC); /* Integrator statistics */ // No. of function calls ISTATS[0] += idata[10]; // No. of jacobian calls ISTATS[1] += idata[11]; // No. of steps ISTATS[2] += idata[12]; // No. of accepted steps ISTATS[3] += idata[13]; // No. of rejected steps (except at very beginning) ISTATS[4] += idata[14]; // No. of LU decompositions ISTATS[5] += idata[15]; // No. of forward/backward substitutions ISTATS[6] += idata[16]; // No. of singular matrix decompositions ISTATS[7] += idata[17]; /* Process integrator return code */ if (idata[19] < 0) { printf("Kppa: CELL CHUNK %zd -- INTEGRATION FAILED\n", i); for(int j=0; j<20; ++j) printf("Kppa: CELL CHUNK %zd, idata[%d] = %d\n", i, j, idata[j]); for(int j=0; j<20; ++j) printf("Kppa: CELL CHUNK %zd, rdata[%d] = %g\n", i, j, rdata[j]); if (idata[19] < retval) retval = idata[19]; } else if (idata[19] > 0) { printf("Kppa: CELL CHUNK %zd -- INTEGRATION COMPLETED WITH WARNING\n", i); if (retval >= 0 && idata[19] > retval) retval = idata[19]; } } /* Deallocate memory and return*/ cudaFree(d_conc); cudaFreeHost(h_conc); cudaFree(d_temp); cudaFreeHost(h_temp); return retval; }/* END GridIntegrate */ /*---------------------- END box_model_cu_integrate.h END ---------------------*/
fd807338a46df130617839814d34fd51e7e7db0d.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_topk.cu * * @brief Simple test driver program for computing Topk. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <utility> #include <iostream> #include <cstdlib> #include <algorithm> #include <fstream> #include <map> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utils #include <gunrock/graphio/market.cuh> // Degree Centrality includes #include <gunrock/app/topk/topk_enactor.cuh> #include <gunrock/app/topk/topk_problem.cuh> // Operator includes #include <gunrock/oprtr/edge_map_forward/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::topk; /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "test <graph-type> [graph-type-arguments]\n" "Graph type and graph type arguments:\n" " market <matrix-market-file-name>\n" " Reads a Matrix-Market coordinate-formatted graph of\n" " directed/undirected edges from STDIN (or from the\n" " optionally-specified file).\n" " rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n" " Generate R-MAT graph as input\n" " --rmat_scale=<vertex-scale>\n" " --rmat_nodes=<number-nodes>\n" " --rmat_edgefactor=<edge-factor>\n" " --rmat_edges=<number-edges>\n" " --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n" " --rmat_seed=<seed>\n" " rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n" " Generate Random Geometry Graph as input\n" " --rgg_scale=<vertex-scale>\n" " --rgg_nodes=<number-nodes>\n" " --rgg_thfactor=<threshold-factor>\n" " --rgg_threshold=<threshold>\n" " --rgg_vmultipiler=<vmultipiler>\n" " --rgg_seed=<seed>\n\n" "Optional arguments:\n" "[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n" "[--undirected] Treat the graph as undirected (symmetric).\n" "[--instrumented] Keep kernels statics [Default: Disable].\n" " total_queued, search_depth and barrier duty.\n" " (a relative indicator of load imbalance.)\n" "[--quick] Skip the CPU reference validation process.\n" "[--disable-size-check] Disable frontier queue size check.\n" "[--grid-size=<grid size>] Maximum allowed grid size setting.\n" "[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--v] Print verbose per iteration debug info.\n" "[--iteration-num=<num>] Number of runs to perform the test.\n" "[--quiet] No output (unless --json is specified).\n" "[--json] Output JSON-format statistics to STDOUT.\n" "[--jsonfile=<name>] Output JSON-format statistics to file <name>\n" "[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n" " where name is auto-generated.\n" ); } /** * @brief displays the top K results * */ template < typename VertexId, typename Value, typename SizeT > void DisplaySolution( VertexId *h_node_id, Value *h_degrees_i, Value *h_degrees_o, SizeT num_nodes) { fflush(stdout); // at most display the first 100 results if (num_nodes > 100) num_nodes = 100; printf("==> top %d centrality nodes:\n", num_nodes); for (SizeT iter = 0; iter < num_nodes; ++iter) printf("%d %d %d\n", h_node_id[iter], h_degrees_i[iter], h_degrees_o[iter]); } /****************************************************************************** * Degree Centrality Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference TOPK implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on */ struct compare_second_only { template <typename T1, typename T2> bool operator()(const std::pair<T1, T2>& p1, const std::pair<T1, T2>& p2) { return p1.second > p2. second; } }; template < typename VertexId, typename Value, typename SizeT > void SimpleReferenceTopK( Csr<VertexId, Value, SizeT> &csr, Csr<VertexId, Value, SizeT> &csc, VertexId *ref_node_id, Value *ref_degrees, SizeT top_nodes) { printf("CPU reference test.\n"); CpuTimer cpu_timer; // malloc degree centrality spaces Value *ref_degrees_original = (Value*)malloc(sizeof(Value) * csr.nodes); Value *ref_degrees_reversed = (Value*)malloc(sizeof(Value) * csc.nodes); // store reference output results std::vector< std::pair<int, int> > results; // calculations for (SizeT node = 0; node < csr.nodes; ++node) { ref_degrees_original[node] = csr.row_offsets[node + 1] - csr.row_offsets[node]; ref_degrees_reversed[node] = csc.row_offsets[node + 1] - csc.row_offsets[node]; } cpu_timer.Start(); // add ingoing degrees and outgoing degrees together for (SizeT node = 0; node < csr.nodes; ++node) { ref_degrees_original[node] = ref_degrees_original[node] + ref_degrees_reversed[node]; results.push_back( std::make_pair (node, ref_degrees_original[node]) ); } // pair sort according to second elements - degree centrality std::stable_sort(results.begin(), results.end(), compare_second_only()); for (SizeT itr = 0; itr < top_nodes; ++itr) { ref_node_id[itr] = results[itr].first; ref_degrees[itr] = results[itr].second; } cpu_timer.Stop(); float elapsed_cpu = cpu_timer.ElapsedMillis(); printf("==> CPU Degree Centrality finished in %lf msec.\n", elapsed_cpu); // clean up if neccessary if (ref_degrees_original) { free(ref_degrees_original); } if (ref_degrees_reversed) { free(ref_degrees_reversed); } results.clear(); } /** * @brief Run TopK tests * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * @tparam SIZE_CHECK * * @param[in] info Pointer to info contains parameters and statistics. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK > void RunTests(Info<VertexId, Value, SizeT> *info) { // define the problem data structure for graph primitive typedef TOPKProblem < VertexId, SizeT, Value > Problem; Csr<VertexId, Value, SizeT> *csr = info->csr_ptr; Csr<VertexId, Value, SizeT> *csc = info->csc_ptr; int max_grid_size = info->info["max_grid_size"].get_int(); int num_gpus = info->info["num_gpus"].get_int(); bool stream_from_host = info->info["stream_from_host"].get_bool(); SizeT top_nodes = info->info["top_nodes"].get_int(); bool quiet_mode = info->info["quiet_mode"].get_bool(); ContextPtr *context = (ContextPtr*)info->context; json_spirit::mArray device_list = info->info["device_list"].get_array(); int* gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int(); // INSTRUMENT specifies whether we want to keep such statistical data // Allocate TOPK enactor map TOPKEnactor<Problem, INSTRUMENT, DEBUG, SIZE_CHECK> topk_enactor(gpu_idx); // allocate problem on GPU // create a pointer of the TOPKProblem type Problem *topk_problem = new Problem; // reset top_nodes if input k > total number of nodes if (top_nodes > csr->nodes) top_nodes = csr->nodes; // malloc host memory VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * top_nodes); VertexId *ref_node_id = (VertexId*)malloc(sizeof(VertexId) * top_nodes); Value *h_degrees_i = (Value*)malloc(sizeof(Value) * top_nodes); Value *h_degrees_o = (Value*)malloc(sizeof(Value) * top_nodes); Value *ref_degrees = (Value*)malloc(sizeof(Value) * top_nodes); // copy data from CPU to GPU // initialize data members in DataSlice for graph util::GRError(topk_problem->Init( stream_from_host, *csr, *csc, num_gpus), "Problem TOPK Initialization Failed", __FILE__, __LINE__); // perform topk degree centrality calculations GpuTimer gpu_timer; // Record the kernel running time // reset values in DataSlice for graph util::GRError(topk_problem->Reset(topk_enactor.GetFrontierType()), "TOPK Problem Data Reset Failed", __FILE__, __LINE__); gpu_timer.Start(); // launch topk enactor util::GRError(topk_enactor.template Enact<Problem>(*context, topk_problem, top_nodes, max_grid_size), "TOPK Problem Enact Failed", __FILE__, __LINE__); gpu_timer.Stop(); float elapsed_gpu = gpu_timer.ElapsedMillis(); printf("==> GPU TopK Degree Centrality finished in %lf msec.\n", elapsed_gpu); // copy out results back to CPU from GPU using Extract util::GRError(topk_problem->Extract( h_node_id, h_degrees_i, h_degrees_o, top_nodes), "TOPK Problem Data Extraction Failed", __FILE__, __LINE__); // display solution if (!quiet_mode) DisplaySolution( h_node_id, h_degrees_i, h_degrees_o, top_nodes); info->ComputeCommonStats(topk_enactor.enactor_stats.GetPointer(), elapsed_gpu); if (!quiet_mode) info->DisplayStats(); // validation SimpleReferenceTopK( *csr, *csc, ref_node_id, ref_degrees, top_nodes); int error_num = CompareResults(h_node_id, ref_node_id, top_nodes, true); if (error_num > 0) { if (!quiet_mode) printf("INCOREECT! %d error(s) occured. \n", error_num); } if (!quiet_mode) printf("\n"); info->CollectInfo(); // cleanup if neccessary if (topk_problem) { delete topk_problem; } if (h_node_id) { free(h_node_id); } if (h_degrees_i) { free(h_degrees_i); } if (h_degrees_o) { free(h_degrees_o); } hipDeviceSynchronize(); } /** * @brief Test entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * * @param[in] info Pointer to info contains parameters and statistics. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG > void RunTests_size_check(Info<VertexId, Value, SizeT> *info) { if (info->info["size_check"].get_bool()) RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, true > (info); else RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, false> (info); } /** * @brief Test entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * * @param[in] info Pointer to info contains parameters and statistics. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT > void RunTests_debug(Info<VertexId, Value, SizeT> *info) { if (info->info["debug_mode"].get_bool()) RunTests_size_check <VertexId, Value, SizeT, INSTRUMENT, true > (info); else RunTests_size_check <VertexId, Value, SizeT, INSTRUMENT, false> (info); } /** * @brief Test entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to info contains parameters and statistics. */ template < typename VertexId, typename Value, typename SizeT > void RunTests_instrumented(Info<VertexId, Value, SizeT> *info) { if (info->info["instrument"].get_bool()) RunTests_debug <VertexId, Value, SizeT, true > (info); else RunTests_debug <VertexId, Value, SizeT, false> (info); } /****************************************************************************** * Main ******************************************************************************/ int main(int argc, char** argv) { CommandLineArgs args(argc, argv); int graph_args = argc - args.ParsedArgc() - 1; if ((argc < 2) || (args.CheckCmdLineFlag("help"))) { Usage(); return 1; } typedef int VertexId; typedef int Value; typedef int SizeT; Csr<VertexId, Value, SizeT> csr(false); Csr<VertexId, Value, SizeT> csc(false); Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>; info->info["undirected"] = args.CheckCmdLineFlag("undirected"); info->Init("TOPK", args, csr, csc); RunTests_instrumented<VertexId, Value, SizeT>(info); return 0; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
fd807338a46df130617839814d34fd51e7e7db0d.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_topk.cu * * @brief Simple test driver program for computing Topk. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <utility> #include <iostream> #include <cstdlib> #include <algorithm> #include <fstream> #include <map> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utils #include <gunrock/graphio/market.cuh> // Degree Centrality includes #include <gunrock/app/topk/topk_enactor.cuh> #include <gunrock/app/topk/topk_problem.cuh> // Operator includes #include <gunrock/oprtr/edge_map_forward/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::topk; /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "test <graph-type> [graph-type-arguments]\n" "Graph type and graph type arguments:\n" " market <matrix-market-file-name>\n" " Reads a Matrix-Market coordinate-formatted graph of\n" " directed/undirected edges from STDIN (or from the\n" " optionally-specified file).\n" " rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n" " Generate R-MAT graph as input\n" " --rmat_scale=<vertex-scale>\n" " --rmat_nodes=<number-nodes>\n" " --rmat_edgefactor=<edge-factor>\n" " --rmat_edges=<number-edges>\n" " --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n" " --rmat_seed=<seed>\n" " rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n" " Generate Random Geometry Graph as input\n" " --rgg_scale=<vertex-scale>\n" " --rgg_nodes=<number-nodes>\n" " --rgg_thfactor=<threshold-factor>\n" " --rgg_threshold=<threshold>\n" " --rgg_vmultipiler=<vmultipiler>\n" " --rgg_seed=<seed>\n\n" "Optional arguments:\n" "[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n" "[--undirected] Treat the graph as undirected (symmetric).\n" "[--instrumented] Keep kernels statics [Default: Disable].\n" " total_queued, search_depth and barrier duty.\n" " (a relative indicator of load imbalance.)\n" "[--quick] Skip the CPU reference validation process.\n" "[--disable-size-check] Disable frontier queue size check.\n" "[--grid-size=<grid size>] Maximum allowed grid size setting.\n" "[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--v] Print verbose per iteration debug info.\n" "[--iteration-num=<num>] Number of runs to perform the test.\n" "[--quiet] No output (unless --json is specified).\n" "[--json] Output JSON-format statistics to STDOUT.\n" "[--jsonfile=<name>] Output JSON-format statistics to file <name>\n" "[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n" " where name is auto-generated.\n" ); } /** * @brief displays the top K results * */ template < typename VertexId, typename Value, typename SizeT > void DisplaySolution( VertexId *h_node_id, Value *h_degrees_i, Value *h_degrees_o, SizeT num_nodes) { fflush(stdout); // at most display the first 100 results if (num_nodes > 100) num_nodes = 100; printf("==> top %d centrality nodes:\n", num_nodes); for (SizeT iter = 0; iter < num_nodes; ++iter) printf("%d %d %d\n", h_node_id[iter], h_degrees_i[iter], h_degrees_o[iter]); } /****************************************************************************** * Degree Centrality Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference TOPK implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on */ struct compare_second_only { template <typename T1, typename T2> bool operator()(const std::pair<T1, T2>& p1, const std::pair<T1, T2>& p2) { return p1.second > p2. second; } }; template < typename VertexId, typename Value, typename SizeT > void SimpleReferenceTopK( Csr<VertexId, Value, SizeT> &csr, Csr<VertexId, Value, SizeT> &csc, VertexId *ref_node_id, Value *ref_degrees, SizeT top_nodes) { printf("CPU reference test.\n"); CpuTimer cpu_timer; // malloc degree centrality spaces Value *ref_degrees_original = (Value*)malloc(sizeof(Value) * csr.nodes); Value *ref_degrees_reversed = (Value*)malloc(sizeof(Value) * csc.nodes); // store reference output results std::vector< std::pair<int, int> > results; // calculations for (SizeT node = 0; node < csr.nodes; ++node) { ref_degrees_original[node] = csr.row_offsets[node + 1] - csr.row_offsets[node]; ref_degrees_reversed[node] = csc.row_offsets[node + 1] - csc.row_offsets[node]; } cpu_timer.Start(); // add ingoing degrees and outgoing degrees together for (SizeT node = 0; node < csr.nodes; ++node) { ref_degrees_original[node] = ref_degrees_original[node] + ref_degrees_reversed[node]; results.push_back( std::make_pair (node, ref_degrees_original[node]) ); } // pair sort according to second elements - degree centrality std::stable_sort(results.begin(), results.end(), compare_second_only()); for (SizeT itr = 0; itr < top_nodes; ++itr) { ref_node_id[itr] = results[itr].first; ref_degrees[itr] = results[itr].second; } cpu_timer.Stop(); float elapsed_cpu = cpu_timer.ElapsedMillis(); printf("==> CPU Degree Centrality finished in %lf msec.\n", elapsed_cpu); // clean up if neccessary if (ref_degrees_original) { free(ref_degrees_original); } if (ref_degrees_reversed) { free(ref_degrees_reversed); } results.clear(); } /** * @brief Run TopK tests * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * @tparam SIZE_CHECK * * @param[in] info Pointer to info contains parameters and statistics. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK > void RunTests(Info<VertexId, Value, SizeT> *info) { // define the problem data structure for graph primitive typedef TOPKProblem < VertexId, SizeT, Value > Problem; Csr<VertexId, Value, SizeT> *csr = info->csr_ptr; Csr<VertexId, Value, SizeT> *csc = info->csc_ptr; int max_grid_size = info->info["max_grid_size"].get_int(); int num_gpus = info->info["num_gpus"].get_int(); bool stream_from_host = info->info["stream_from_host"].get_bool(); SizeT top_nodes = info->info["top_nodes"].get_int(); bool quiet_mode = info->info["quiet_mode"].get_bool(); ContextPtr *context = (ContextPtr*)info->context; json_spirit::mArray device_list = info->info["device_list"].get_array(); int* gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int(); // INSTRUMENT specifies whether we want to keep such statistical data // Allocate TOPK enactor map TOPKEnactor<Problem, INSTRUMENT, DEBUG, SIZE_CHECK> topk_enactor(gpu_idx); // allocate problem on GPU // create a pointer of the TOPKProblem type Problem *topk_problem = new Problem; // reset top_nodes if input k > total number of nodes if (top_nodes > csr->nodes) top_nodes = csr->nodes; // malloc host memory VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * top_nodes); VertexId *ref_node_id = (VertexId*)malloc(sizeof(VertexId) * top_nodes); Value *h_degrees_i = (Value*)malloc(sizeof(Value) * top_nodes); Value *h_degrees_o = (Value*)malloc(sizeof(Value) * top_nodes); Value *ref_degrees = (Value*)malloc(sizeof(Value) * top_nodes); // copy data from CPU to GPU // initialize data members in DataSlice for graph util::GRError(topk_problem->Init( stream_from_host, *csr, *csc, num_gpus), "Problem TOPK Initialization Failed", __FILE__, __LINE__); // perform topk degree centrality calculations GpuTimer gpu_timer; // Record the kernel running time // reset values in DataSlice for graph util::GRError(topk_problem->Reset(topk_enactor.GetFrontierType()), "TOPK Problem Data Reset Failed", __FILE__, __LINE__); gpu_timer.Start(); // launch topk enactor util::GRError(topk_enactor.template Enact<Problem>(*context, topk_problem, top_nodes, max_grid_size), "TOPK Problem Enact Failed", __FILE__, __LINE__); gpu_timer.Stop(); float elapsed_gpu = gpu_timer.ElapsedMillis(); printf("==> GPU TopK Degree Centrality finished in %lf msec.\n", elapsed_gpu); // copy out results back to CPU from GPU using Extract util::GRError(topk_problem->Extract( h_node_id, h_degrees_i, h_degrees_o, top_nodes), "TOPK Problem Data Extraction Failed", __FILE__, __LINE__); // display solution if (!quiet_mode) DisplaySolution( h_node_id, h_degrees_i, h_degrees_o, top_nodes); info->ComputeCommonStats(topk_enactor.enactor_stats.GetPointer(), elapsed_gpu); if (!quiet_mode) info->DisplayStats(); // validation SimpleReferenceTopK( *csr, *csc, ref_node_id, ref_degrees, top_nodes); int error_num = CompareResults(h_node_id, ref_node_id, top_nodes, true); if (error_num > 0) { if (!quiet_mode) printf("INCOREECT! %d error(s) occured. \n", error_num); } if (!quiet_mode) printf("\n"); info->CollectInfo(); // cleanup if neccessary if (topk_problem) { delete topk_problem; } if (h_node_id) { free(h_node_id); } if (h_degrees_i) { free(h_degrees_i); } if (h_degrees_o) { free(h_degrees_o); } cudaDeviceSynchronize(); } /** * @brief Test entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * * @param[in] info Pointer to info contains parameters and statistics. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG > void RunTests_size_check(Info<VertexId, Value, SizeT> *info) { if (info->info["size_check"].get_bool()) RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, true > (info); else RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, false> (info); } /** * @brief Test entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * * @param[in] info Pointer to info contains parameters and statistics. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT > void RunTests_debug(Info<VertexId, Value, SizeT> *info) { if (info->info["debug_mode"].get_bool()) RunTests_size_check <VertexId, Value, SizeT, INSTRUMENT, true > (info); else RunTests_size_check <VertexId, Value, SizeT, INSTRUMENT, false> (info); } /** * @brief Test entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to info contains parameters and statistics. */ template < typename VertexId, typename Value, typename SizeT > void RunTests_instrumented(Info<VertexId, Value, SizeT> *info) { if (info->info["instrument"].get_bool()) RunTests_debug <VertexId, Value, SizeT, true > (info); else RunTests_debug <VertexId, Value, SizeT, false> (info); } /****************************************************************************** * Main ******************************************************************************/ int main(int argc, char** argv) { CommandLineArgs args(argc, argv); int graph_args = argc - args.ParsedArgc() - 1; if ((argc < 2) || (args.CheckCmdLineFlag("help"))) { Usage(); return 1; } typedef int VertexId; typedef int Value; typedef int SizeT; Csr<VertexId, Value, SizeT> csr(false); Csr<VertexId, Value, SizeT> csc(false); Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>; info->info["undirected"] = args.CheckCmdLineFlag("undirected"); info->Init("TOPK", args, csr, csc); RunTests_instrumented<VertexId, Value, SizeT>(info); return 0; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
2b16d21878e51a6142367281ee34383db90afe79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <helper_cuda.h> #include "alloc.h" extern "C" { #include <omp.h> __global__ void kernel_gpu2(int m, int n, int k, double *A, double *B, double *C){ // 2D thread indices defining row and col of element int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; int l; if (i<m && j<n){ C[i*n + j] = 0; for(l=0;l<k;l++){ C[i*n+j] += A[i*k+l]*B[l*n+j]; } } } void matmult_gpu2(int m, int n, int k, double* A, double* B, double* C){ /*Declaring matrices (as arrays) matrix A: m, k matrix B: k, n matrix C: m, n */ //Number of entries or matrix size int A_no = m*k*sizeof(double); int B_no = k*n*sizeof(double); int C_no = m*n*sizeof(double); int i;int j; double* d_A; double* d_B; double* d_C; //Allocation of memory for matrices alloc(&d_A, &d_B, &d_C, A_no, B_no, C_no); //Initialize matrix entries to one //init(h_A, A_no, h_B, B_no, h_C, C_no); //Transfer matrix to device transferToDevice(A, B, C, d_A, d_B, d_C, A_no, B_no, C_no); // Kernel launch int K = 16; //Size of the block dim3 dimgrid(ceil((double) n/K), ceil((double) m/K)); dim3 dimblock(K,K); double time = omp_get_wtime(); hipLaunchKernelGGL(( kernel_gpu2), dim3(dimgrid), dim3(dimblock), 0, 0, m,n,k,d_A,d_B,d_C); hipDeviceSynchronize(); double elapsed = omp_get_wtime()-time; printf("%5.10f\n", elapsed); //Transfer results from device to host transferToHost(A, B, C, d_A, d_B, d_C, A_no, B_no, C_no); //Device Synchronization (and Cuda Error Check) //checkCudaErrors(hipDeviceSynchronize()); /*for (i=0;i<m;i++){ for (j=0;j<n;j++){ printf("%f ", C[i*n + j]); }printf("\n");}printf("\n");*/ //Freeing allocated memory freeall(d_A, d_B, d_C); } }
2b16d21878e51a6142367281ee34383db90afe79.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <helper_cuda.h> #include "alloc.h" extern "C" { #include <omp.h> __global__ void kernel_gpu2(int m, int n, int k, double *A, double *B, double *C){ // 2D thread indices defining row and col of element int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; int l; if (i<m && j<n){ C[i*n + j] = 0; for(l=0;l<k;l++){ C[i*n+j] += A[i*k+l]*B[l*n+j]; } } } void matmult_gpu2(int m, int n, int k, double* A, double* B, double* C){ /*Declaring matrices (as arrays) matrix A: m, k matrix B: k, n matrix C: m, n */ //Number of entries or matrix size int A_no = m*k*sizeof(double); int B_no = k*n*sizeof(double); int C_no = m*n*sizeof(double); int i;int j; double* d_A; double* d_B; double* d_C; //Allocation of memory for matrices alloc(&d_A, &d_B, &d_C, A_no, B_no, C_no); //Initialize matrix entries to one //init(h_A, A_no, h_B, B_no, h_C, C_no); //Transfer matrix to device transferToDevice(A, B, C, d_A, d_B, d_C, A_no, B_no, C_no); // Kernel launch int K = 16; //Size of the block dim3 dimgrid(ceil((double) n/K), ceil((double) m/K)); dim3 dimblock(K,K); double time = omp_get_wtime(); kernel_gpu2<<<dimgrid, dimblock>>>(m,n,k,d_A,d_B,d_C); cudaDeviceSynchronize(); double elapsed = omp_get_wtime()-time; printf("%5.10f\n", elapsed); //Transfer results from device to host transferToHost(A, B, C, d_A, d_B, d_C, A_no, B_no, C_no); //Device Synchronization (and Cuda Error Check) //checkCudaErrors(cudaDeviceSynchronize()); /*for (i=0;i<m;i++){ for (j=0;j<n;j++){ printf("%f ", C[i*n + j]); }printf("\n");}printf("\n");*/ //Freeing allocated memory freeall(d_A, d_B, d_C); } }
b6f012915056219da4c00794278687e1790bcea7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright(C) 2010 by // Tsuyoshi Hamada <hamada@progrape.jp> // Keigo Nitadori <nitadori@margaux.astron.s.u-tokyo.ac.jp> // Rio Yokota <rio.yokota@bristol.ac.uk> #include "vforce.h" namespace libcunbody{ using namespace std; __device__ float4 dev_apot(float4 xi, float4 xj, float4 apot) { float dx = xj.x - xi.x; float dy = xj.y - xi.y; float dz = xj.z - xi.z; float eps2 = xi.w; float mj = xj.w; float r2 = ((eps2 + dx*dx) + dy*dy) + dz*dz; float r1i = rsqrt(r2); float r2i = r1i*r1i; float mr1i = mj * r1i; float mr3i = mr1i * r2i; apot.x += dx * mr3i; apot.y += dy * mr3i; apot.z += dz * mr3i; apot.w -= mr1i; return (apot); } __global__ void cunbody_kernel_tree_015( float4 *xilist, float4 *xjlist, float4 *apotlist, int2 *off) { const int NJBLOCK = 128; int gid = threadIdx.x + blockDim.x * blockIdx.x; float4 xi = xilist[gid]; float4 apot = make_float4(0.0f, 0.0f, 0.0f, 0.0f); int jstart = off[gid].x; int jend = off[gid].y; for(int jbase=jstart; jbase<jend; jbase+=NJBLOCK){ #pragma unroll 128 for(int j=0; j<NJBLOCK; j++){ float4 xj = xjlist[jbase + j]; apot = dev_apot (xi, xj, apot); } } apotlist[gid] = apot; } }; // namespace libcunbody __END__
b6f012915056219da4c00794278687e1790bcea7.cu
// Copyright(C) 2010 by // Tsuyoshi Hamada <hamada@progrape.jp> // Keigo Nitadori <nitadori@margaux.astron.s.u-tokyo.ac.jp> // Rio Yokota <rio.yokota@bristol.ac.uk> #include "vforce.h" namespace libcunbody{ using namespace std; __device__ float4 dev_apot(float4 xi, float4 xj, float4 apot) { float dx = xj.x - xi.x; float dy = xj.y - xi.y; float dz = xj.z - xi.z; float eps2 = xi.w; float mj = xj.w; float r2 = ((eps2 + dx*dx) + dy*dy) + dz*dz; float r1i = rsqrt(r2); float r2i = r1i*r1i; float mr1i = mj * r1i; float mr3i = mr1i * r2i; apot.x += dx * mr3i; apot.y += dy * mr3i; apot.z += dz * mr3i; apot.w -= mr1i; return (apot); } __global__ void cunbody_kernel_tree_015( float4 *xilist, float4 *xjlist, float4 *apotlist, int2 *off) { const int NJBLOCK = 128; int gid = threadIdx.x + blockDim.x * blockIdx.x; float4 xi = xilist[gid]; float4 apot = make_float4(0.0f, 0.0f, 0.0f, 0.0f); int jstart = off[gid].x; int jend = off[gid].y; for(int jbase=jstart; jbase<jend; jbase+=NJBLOCK){ #pragma unroll 128 for(int j=0; j<NJBLOCK; j++){ float4 xj = xjlist[jbase + j]; apot = dev_apot (xi, xj, apot); } } apotlist[gid] = apot; } }; // namespace libcunbody __END__
01971dc13dc6401483be7694bb92c6c8dca13be0.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __MEMORY_CU__ #define __MEMORY_CU__ #include <Windows.h> #include <iostream> #include <stdio.h> #include <mmsystem.h> // timeGetTime() #pragma comment( lib, "winmm.lib" ) #include "../libgqd/inc/cuda_header.h" hipError_t MemoryBandwidthCheck(){ int elem_size = 4; int tortal_size = (512 * 1024 * 1024); // host memory set up int *p_hmem = (int*)malloc(tortal_size); for (int i = 0; i < tortal_size / sizeof(int); ++i) { *p_hmem++ = i; } // device memory set up hipError_t cudaStatus; size_t avai, total; cudaStatus = hipMemGetInfo(&avai, &total); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemGetInfo failed!"); goto cuError; } if (avai + (1024*1024) < tortal_size ) { fprintf(stderr, "avairable memory not enough!"); goto cuError; } int *p_dmem; cudaStatus = hipMalloc((void**)&p_dmem, tortal_size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto cuError; } printf("QueryPerformanceCounter()\n"); LARGE_INTEGER liFreq; BOOL QPCsupported = QueryPerformanceFrequency(&liFreq); if (!QPCsupported) { printf("QueryPerformanceCounter not supported.\n"); exit(0); } printf("Freq = %9.4ld[counts/sec]\n", liFreq.QuadPart); LARGE_INTEGER cuStart, cuEnd; QueryPerformanceCounter(&cuStart); cudaStatus = hipMemcpy(p_dmem, p_hmem, elem_size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto cuError; } QueryPerformanceCounter(&cuEnd); printf("%9.4lf[ms]\n", 1000.0 * (double)(cuEnd.QuadPart - cuStart.QuadPart) / liFreq.QuadPart); free(p_hmem); hipFree(p_dmem); return hipSuccess; cuError: free(p_hmem); hipFree(p_dmem); return hipError_t::hipErrorInvalidValue; } #endif
01971dc13dc6401483be7694bb92c6c8dca13be0.cu
#ifndef __MEMORY_CU__ #define __MEMORY_CU__ #include <Windows.h> #include <iostream> #include <stdio.h> #include <mmsystem.h> // timeGetTime() #pragma comment( lib, "winmm.lib" ) #include "../libgqd/inc/cuda_header.h" cudaError MemoryBandwidthCheck(){ int elem_size = 4; int tortal_size = (512 * 1024 * 1024); // host memory set up int *p_hmem = (int*)malloc(tortal_size); for (int i = 0; i < tortal_size / sizeof(int); ++i) { *p_hmem++ = i; } // device memory set up cudaError_t cudaStatus; size_t avai, total; cudaStatus = cudaMemGetInfo(&avai, &total); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemGetInfo failed!"); goto cuError; } if (avai + (1024*1024) < tortal_size ) { fprintf(stderr, "avairable memory not enough!"); goto cuError; } int *p_dmem; cudaStatus = cudaMalloc((void**)&p_dmem, tortal_size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto cuError; } printf("QueryPerformanceCounter()\n"); LARGE_INTEGER liFreq; BOOL QPCsupported = QueryPerformanceFrequency(&liFreq); if (!QPCsupported) { printf("QueryPerformanceCounter not supported.\n"); exit(0); } printf("Freq = %9.4ld[counts/sec]\n", liFreq.QuadPart); LARGE_INTEGER cuStart, cuEnd; QueryPerformanceCounter(&cuStart); cudaStatus = cudaMemcpy(p_dmem, p_hmem, elem_size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto cuError; } QueryPerformanceCounter(&cuEnd); printf("%9.4lf[ms]\n", 1000.0 * (double)(cuEnd.QuadPart - cuStart.QuadPart) / liFreq.QuadPart); free(p_hmem); cudaFree(p_dmem); return cudaSuccess; cuError: free(p_hmem); cudaFree(p_dmem); return cudaError::cudaErrorInvalidValue; } #endif
50f40612febe7e78bbea5009114da0958d9d94d2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Replace.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *WHAT = NULL; hipMalloc(&WHAT, XSIZE*YSIZE); float *WHERE = NULL; hipMalloc(&WHERE, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Replace), dim3(gridBlock),dim3(threadBlock), 0, 0, WHAT,WHERE); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Replace), dim3(gridBlock),dim3(threadBlock), 0, 0, WHAT,WHERE); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Replace), dim3(gridBlock),dim3(threadBlock), 0, 0, WHAT,WHERE); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
50f40612febe7e78bbea5009114da0958d9d94d2.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Replace.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *WHAT = NULL; cudaMalloc(&WHAT, XSIZE*YSIZE); float *WHERE = NULL; cudaMalloc(&WHERE, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Replace<<<gridBlock,threadBlock>>>(WHAT,WHERE); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Replace<<<gridBlock,threadBlock>>>(WHAT,WHERE); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Replace<<<gridBlock,threadBlock>>>(WHAT,WHERE); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8992b8bfa54a7073e071fc1d67762e032f1fd46c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sh_handler.h" #define RING_BATCH_SIZE 8 #define DPDK_RING_SIZE (BATCH_SIZE * RING_BATCH_SIZE) #define RTE_ETH_CRC_LEN 5 #define TOTAL_PKT_SIZE (PKT_SIZE + RTE_ETH_CRC_LEN) #define ONELINE 6 #define GPU_TIME 1 #define MANI 0 unsigned char * pinned_pkt_buf; unsigned char * tmp; static int idx; uint64_t start; uint64_t end; static uint64_t recv_total; __global__ void print_gpu(unsigned char* d_pkt_buf) { int i; START_RED printf("[GPU]:\n"); for(i = 0; i < TOTAL_PKT_SIZE; i++) { if(i != 0 && i % ONELINE ==0) printf("\n"); printf("%02x ", d_pkt_buf[i]); } printf("\n"); END } __global__ void mani_pkt_gpu(unsigned char * d_pkt_buf, unsigned char * tmp, uint64_t * recv_total, int size) { *recv_total += 1; printf("recv_total = %ld\n", *recv_total); int i; // Swap mac for(i = 0; i < 6; i++){ tmp[i] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 6]; d_pkt_buf[i + 6] = tmp[i]; } // Swap ip for(i = 26; i < 30; i++){ tmp[i-26] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 4]; d_pkt_buf[i + 4] = tmp[i-26]; } // Swap port for(i = 34; i < 36; i++){ tmp[i-34] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 2]; d_pkt_buf[i + 2] = tmp[i-34]; } //Manipulatate data for(i = 36; i < size; i++){ d_pkt_buf[i] = 0; } } extern "C" uint64_t monotonic_time() { struct timespec timespec; clock_gettime(CLOCK_MONOTONIC, &timespec); return timespec.tv_sec * ONE_SEC + timespec.tv_nsec; } extern "C" void copy_to_gpu(unsigned char* buf, int size, float * total_time) { hipEvent_t go, stop; float time = 0; hipEventCreate(&go); hipEventCreate(&stop); hipMemcpy(pinned_pkt_buf + (idx * BATCH_SIZE), buf, sizeof(unsigned char)*size, hipMemcpyHostToDevice); idx++; if(idx == 512) idx = 0; #if MANI hipEventRecord(go, 0); //for(int i = 0; i < BATCH_NUM; i += PKT_SIZE) //{ // hipLaunchKernelGGL(( mani_pkt_gpu), dim3(1),dim3(1), 0, 0, pinned_pkt_buf + (idx * BATCH_SIZE), tmp, &recv_total, size); // } //print_gpu<<<1,1>>>(pinned_pkt_buf + (idx * BATCH_SIZE)); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, go, stop); hipEventDestroy(go); hipEventDestroy(stop); *total_time += time; time = 0; if(*total_time >= 10) { //printf("recv_total = %ld\n", recv_total); //printf("total_time = %f\n", *total_time); *total_time = 0; recv_total = 0; } hipMemcpy(buf, pinned_pkt_buf + (idx * BATCH_SIZE), sizeof(unsigned char) * size, hipMemcpyDeviceToHost); #endif #if GPU_TIME for(int i = 0; i < BATCH_NUM; i += PKT_SIZE) { hipLaunchKernelGGL(( mani_pkt_gpu), dim3(1),dim3(1), 0, 0, pinned_pkt_buf + (idx * BATCH_SIZE) + i, tmp, &recv_total, size); } hipLaunchKernelGGL(( print_gpu), dim3(1),dim3(1), 0, 0, pinned_pkt_buf + (idx * BATCH_SIZE)); hipDeviceSynchronize(); end = monotonic_time(); if(end - start >= ONE_SEC) { printf("recv_total = %ld\n", recv_total); recv_total = 0; start = monotonic_time(); } #endif } extern "C" void set_gpu_mem_for_dpdk(void) { size_t pkt_buffer_size = DPDK_RING_SIZE; idx = 0; ASSERTRT(hipMalloc((void**)&pinned_pkt_buf, pkt_buffer_size)); ASSERTRT(hipMemset(pinned_pkt_buf, 1, pkt_buffer_size)); ASSERTRT(hipMalloc((void**)&tmp, sizeof(unsigned char) * 6)); ASSERTRT(hipMemset(tmp, 0, 6)); #if GPU_TIME start = monotonic_time(); recv_total = 0; #endif START_GRN printf("[Done]____GPU mem set for dpdk____\n"); END } __global__ void read_loop(void) { } extern "C" void read_handler(void) { hipLaunchKernelGGL(( read_loop), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); }
8992b8bfa54a7073e071fc1d67762e032f1fd46c.cu
#include "sh_handler.h" #define RING_BATCH_SIZE 8 #define DPDK_RING_SIZE (BATCH_SIZE * RING_BATCH_SIZE) #define RTE_ETH_CRC_LEN 5 #define TOTAL_PKT_SIZE (PKT_SIZE + RTE_ETH_CRC_LEN) #define ONELINE 6 #define GPU_TIME 1 #define MANI 0 unsigned char * pinned_pkt_buf; unsigned char * tmp; static int idx; uint64_t start; uint64_t end; static uint64_t recv_total; __global__ void print_gpu(unsigned char* d_pkt_buf) { int i; START_RED printf("[GPU]:\n"); for(i = 0; i < TOTAL_PKT_SIZE; i++) { if(i != 0 && i % ONELINE ==0) printf("\n"); printf("%02x ", d_pkt_buf[i]); } printf("\n"); END } __global__ void mani_pkt_gpu(unsigned char * d_pkt_buf, unsigned char * tmp, uint64_t * recv_total, int size) { *recv_total += 1; printf("recv_total = %ld\n", *recv_total); int i; // Swap mac for(i = 0; i < 6; i++){ tmp[i] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 6]; d_pkt_buf[i + 6] = tmp[i]; } // Swap ip for(i = 26; i < 30; i++){ tmp[i-26] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 4]; d_pkt_buf[i + 4] = tmp[i-26]; } // Swap port for(i = 34; i < 36; i++){ tmp[i-34] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 2]; d_pkt_buf[i + 2] = tmp[i-34]; } //Manipulatate data for(i = 36; i < size; i++){ d_pkt_buf[i] = 0; } } extern "C" uint64_t monotonic_time() { struct timespec timespec; clock_gettime(CLOCK_MONOTONIC, &timespec); return timespec.tv_sec * ONE_SEC + timespec.tv_nsec; } extern "C" void copy_to_gpu(unsigned char* buf, int size, float * total_time) { cudaEvent_t go, stop; float time = 0; cudaEventCreate(&go); cudaEventCreate(&stop); cudaMemcpy(pinned_pkt_buf + (idx * BATCH_SIZE), buf, sizeof(unsigned char)*size, cudaMemcpyHostToDevice); idx++; if(idx == 512) idx = 0; #if MANI cudaEventRecord(go, 0); //for(int i = 0; i < BATCH_NUM; i += PKT_SIZE) //{ // mani_pkt_gpu<<<1,1>>>(pinned_pkt_buf + (idx * BATCH_SIZE), tmp, &recv_total, size); // } //print_gpu<<<1,1>>>(pinned_pkt_buf + (idx * BATCH_SIZE)); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, go, stop); cudaEventDestroy(go); cudaEventDestroy(stop); *total_time += time; time = 0; if(*total_time >= 10) { //printf("recv_total = %ld\n", recv_total); //printf("total_time = %f\n", *total_time); *total_time = 0; recv_total = 0; } cudaMemcpy(buf, pinned_pkt_buf + (idx * BATCH_SIZE), sizeof(unsigned char) * size, cudaMemcpyDeviceToHost); #endif #if GPU_TIME for(int i = 0; i < BATCH_NUM; i += PKT_SIZE) { mani_pkt_gpu<<<1,1>>>(pinned_pkt_buf + (idx * BATCH_SIZE) + i, tmp, &recv_total, size); } print_gpu<<<1,1>>>(pinned_pkt_buf + (idx * BATCH_SIZE)); cudaDeviceSynchronize(); end = monotonic_time(); if(end - start >= ONE_SEC) { printf("recv_total = %ld\n", recv_total); recv_total = 0; start = monotonic_time(); } #endif } extern "C" void set_gpu_mem_for_dpdk(void) { size_t pkt_buffer_size = DPDK_RING_SIZE; idx = 0; ASSERTRT(cudaMalloc((void**)&pinned_pkt_buf, pkt_buffer_size)); ASSERTRT(cudaMemset(pinned_pkt_buf, 1, pkt_buffer_size)); ASSERTRT(cudaMalloc((void**)&tmp, sizeof(unsigned char) * 6)); ASSERTRT(cudaMemset(tmp, 0, 6)); #if GPU_TIME start = monotonic_time(); recv_total = 0; #endif START_GRN printf("[Done]____GPU mem set for dpdk____\n"); END } __global__ void read_loop(void) { } extern "C" void read_handler(void) { read_loop<<<1,1>>>(); cudaDeviceSynchronize(); }
99806397b5abbfd7a1cf95aaa42aee38d02dcf1f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudapars.h" #include "paramssteeringtest1.h" ///////////////////////////////////// // standard imports ///////////////////////////////////// #include <stdio.h> #include <math.h> #include "step.h" ///////////////////////////////////// // kernel function (CUDA device) ///////////////////////////////////// #include "gradops_hdm2.cuh" __global__ void hyperdifmomsource2_parallel(struct params *p, real *w, real *wnew, real *wmod, real *dwn1, real *wd, int order, int ordero, real *wtemp, int field, int dim, int ii, int ii0, real dt) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int ii1; real fip,fim1,tmpc; int index,k; int ni=p->n[0]; int nj=p->n[1]; //real dt=p->dt; real dy=p->dx[1]; real dx=p->dx[0]; real rdx; //real g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; int ip,jp,ipg,jpg; jp=iindex/(ni/(p->npgp[0])); ip=iindex-(jp*(ni/(p->npgp[0]))); rdx=(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1)); for(ipg=0;ipg<(p->npgp[0]);ipg++) for(jpg=0;jpg<(p->npgp[1]);jpg++) { i=ip*(p->npgp[0])+ipg; j=jp*(p->npgp[1])+jpg; //if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2)) //if(i<((p->n[0])) && j<((p->n[1]))) if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1)) { wtemp[fencode_hdm2(p,i,j,tmp8)]=grad1r_hdm2(wtemp,p,i,j,tmp4,dim); wtemp[fencode_hdm2(p,i,j,tmp7)]=grad1l_hdm2(wtemp,p,i,j,tmp4,dim); } } __syncthreads(); } ///////////////////////////////////// // error checking routine ///////////////////////////////////// void checkErrors_hdm2(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed hipError_t err; err = hipDeviceSynchronize(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = hipGetLastError(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } int cuhyperdifmomsource2(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real **d_wtemp, int field, int dim, int ii, int ii0, real dt) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod, // real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd) //init_parallel(struct params *p, real *b, real *u, real *v, real *h) hipLaunchKernelGGL(( hyperdifmomsource2_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,ii,ii0,dt); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); hipDeviceSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //hipDeviceSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // hipDeviceSynchronize(); // hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost); //hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost); //hipMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost); //checkErrors("copy data from device"); }
99806397b5abbfd7a1cf95aaa42aee38d02dcf1f.cu
#include "cudapars.h" #include "paramssteeringtest1.h" ///////////////////////////////////// // standard imports ///////////////////////////////////// #include <stdio.h> #include <math.h> #include "step.h" ///////////////////////////////////// // kernel function (CUDA device) ///////////////////////////////////// #include "gradops_hdm2.cuh" __global__ void hyperdifmomsource2_parallel(struct params *p, real *w, real *wnew, real *wmod, real *dwn1, real *wd, int order, int ordero, real *wtemp, int field, int dim, int ii, int ii0, real dt) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int ii1; real fip,fim1,tmpc; int index,k; int ni=p->n[0]; int nj=p->n[1]; //real dt=p->dt; real dy=p->dx[1]; real dx=p->dx[0]; real rdx; //real g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; int ip,jp,ipg,jpg; jp=iindex/(ni/(p->npgp[0])); ip=iindex-(jp*(ni/(p->npgp[0]))); rdx=(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1)); for(ipg=0;ipg<(p->npgp[0]);ipg++) for(jpg=0;jpg<(p->npgp[1]);jpg++) { i=ip*(p->npgp[0])+ipg; j=jp*(p->npgp[1])+jpg; //if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2)) //if(i<((p->n[0])) && j<((p->n[1]))) if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1)) { wtemp[fencode_hdm2(p,i,j,tmp8)]=grad1r_hdm2(wtemp,p,i,j,tmp4,dim); wtemp[fencode_hdm2(p,i,j,tmp7)]=grad1l_hdm2(wtemp,p,i,j,tmp4,dim); } } __syncthreads(); } ///////////////////////////////////// // error checking routine ///////////////////////////////////// void checkErrors_hdm2(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } int cuhyperdifmomsource2(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real **d_wtemp, int field, int dim, int ii, int ii0, real dt) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod, // real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd) //init_parallel(struct params *p, real *b, real *u, real *v, real *h) hyperdifmomsource2_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,ii,ii0,dt); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); cudaThreadSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //cudaThreadSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // cudaThreadSynchronize(); // cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost); //cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost); //cudaMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost); //checkErrors("copy data from device"); }
4f00cfa03b825153cea851039dde8d00b4c44213.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated d Tue Aug 13 16:45:13 2013 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce( /*int n,*/ int i, double* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce template< int n > __device__ void sum_reduce_2d( /*int n,*/ int i, int c, double x[][BLOCK_SIZEy+1] ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); } } // end sum_reduce //============================================================================== __global__ void magma_dlarf_kernel( int m, double *v, double *tau, double *c, int ldc, double *xnorm ) { if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) { const int i = threadIdx.x; double *dc = c + blockIdx.x * ldc; __shared__ double sum[ BLOCK_SIZE ]; double lsum; /* w := v' * C */ lsum = MAGMA_D_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ){ if (j==0) lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] ); else lsum += MAGMA_D_MUL( MAGMA_D_CNJG( v[j] ), dc[j] ); } sum[i] = lsum; sum_reduce< BLOCK_SIZE >( i, sum ); /* C := C - v * w */ __syncthreads(); double z__1 = - MAGMA_D_CNJG(*tau) * sum[0]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ double temp = MAGMA_D_ABS( dc[0] ) / xnorm[blockIdx.x]; temp = (temp + 1.) * (1. - temp); xnorm[blockIdx.x] = xnorm[blockIdx.x] * sqrt(temp); } } } //============================================================================== __global__ void magma_dlarf_smkernel( int m, int n, double *v, double *tau, double *c, int ldc, double *xnorm ) { if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) { const int i = threadIdx.x, col= threadIdx.y; for( int k = col; k < n; k+= BLOCK_SIZEy) { double *dc = c + k * ldc; __shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; double lsum; /* w := v' * C */ lsum = MAGMA_D_ZERO; for( int j = i; j < m; j += BLOCK_SIZEx ){ if (j==0) lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] ); else lsum += MAGMA_D_MUL( MAGMA_D_CNJG( v[j] ), dc[j] ); } sum[i][col] = lsum; sum_reduce_2d< BLOCK_SIZEx >( i, col, sum ); /* C := C - v * w */ __syncthreads(); double z__1 = - MAGMA_D_CNJG(*tau) * sum[0][col]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ double temp = MAGMA_D_ABS( dc[0] ) / xnorm[k]; temp = (temp + 1.) * (1. - temp); xnorm[k] = xnorm[k] * sqrt(temp); } } } } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. This routine uses only one SM (block). */ extern "C" void magma_dlarf_sm(int m, int n, double *v, double *tau, double *c, int ldc, double *xnorm) { dim3 blocks( 1 ); dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); hipLaunchKernelGGL(( magma_dlarf_smkernel), dim3(blocks), dim3(threads), 0, magma_stream , m, n, v, tau, c, ldc, xnorm); } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms are adjusted to hold the norms of v(2:m,2:n). This is a difference with the LAPACK's dlarf routine. */ extern "C" magma_int_t magma_dlarf_gpu( magma_int_t m, magma_int_t n, double *v, double *tau, double *c, magma_int_t ldc, double *xnorm) { dim3 blocks( n ); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_dlarf_kernel), dim3(blocks), dim3(threads), 0, magma_stream , m, v, tau, c, ldc, xnorm); // The computation can be done on 1 SM with the following routine. // magma_dlarf_sm(m, n, v, tau, c, ldc, xnorm); return MAGMA_SUCCESS; } //==============================================================================
4f00cfa03b825153cea851039dde8d00b4c44213.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated d Tue Aug 13 16:45:13 2013 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce( /*int n,*/ int i, double* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce template< int n > __device__ void sum_reduce_2d( /*int n,*/ int i, int c, double x[][BLOCK_SIZEy+1] ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); } } // end sum_reduce //============================================================================== __global__ void magma_dlarf_kernel( int m, double *v, double *tau, double *c, int ldc, double *xnorm ) { if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) { const int i = threadIdx.x; double *dc = c + blockIdx.x * ldc; __shared__ double sum[ BLOCK_SIZE ]; double lsum; /* w := v' * C */ lsum = MAGMA_D_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ){ if (j==0) lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] ); else lsum += MAGMA_D_MUL( MAGMA_D_CNJG( v[j] ), dc[j] ); } sum[i] = lsum; sum_reduce< BLOCK_SIZE >( i, sum ); /* C := C - v * w */ __syncthreads(); double z__1 = - MAGMA_D_CNJG(*tau) * sum[0]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ double temp = MAGMA_D_ABS( dc[0] ) / xnorm[blockIdx.x]; temp = (temp + 1.) * (1. - temp); xnorm[blockIdx.x] = xnorm[blockIdx.x] * sqrt(temp); } } } //============================================================================== __global__ void magma_dlarf_smkernel( int m, int n, double *v, double *tau, double *c, int ldc, double *xnorm ) { if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) { const int i = threadIdx.x, col= threadIdx.y; for( int k = col; k < n; k+= BLOCK_SIZEy) { double *dc = c + k * ldc; __shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; double lsum; /* w := v' * C */ lsum = MAGMA_D_ZERO; for( int j = i; j < m; j += BLOCK_SIZEx ){ if (j==0) lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] ); else lsum += MAGMA_D_MUL( MAGMA_D_CNJG( v[j] ), dc[j] ); } sum[i][col] = lsum; sum_reduce_2d< BLOCK_SIZEx >( i, col, sum ); /* C := C - v * w */ __syncthreads(); double z__1 = - MAGMA_D_CNJG(*tau) * sum[0][col]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ double temp = MAGMA_D_ABS( dc[0] ) / xnorm[k]; temp = (temp + 1.) * (1. - temp); xnorm[k] = xnorm[k] * sqrt(temp); } } } } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. This routine uses only one SM (block). */ extern "C" void magma_dlarf_sm(int m, int n, double *v, double *tau, double *c, int ldc, double *xnorm) { dim3 blocks( 1 ); dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); magma_dlarf_smkernel<<< blocks, threads, 0, magma_stream >>>( m, n, v, tau, c, ldc, xnorm); } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms are adjusted to hold the norms of v(2:m,2:n). This is a difference with the LAPACK's dlarf routine. */ extern "C" magma_int_t magma_dlarf_gpu( magma_int_t m, magma_int_t n, double *v, double *tau, double *c, magma_int_t ldc, double *xnorm) { dim3 blocks( n ); dim3 threads( BLOCK_SIZE ); magma_dlarf_kernel<<< blocks, threads, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm); // The computation can be done on 1 SM with the following routine. // magma_dlarf_sm(m, n, v, tau, c, ldc, xnorm); return MAGMA_SUCCESS; } //==============================================================================
2a7af526da68b68d1ddac08a41862ee138c3c9c4.hip
// !!! This is a file automatically generated by hipify!!! /* * Diana-Andreea Popescu, EPFL & CERN, Switerland. All rights reserved. */ #include <stdio.h> #include <assert.h> #include <math.h> // CUDA runtime #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "Comp.cuh" // Helper functions and utilities to work with CUDA #include <helper_functions.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <omp.h> #include <vector> #include <list> #include <algorithm> #include <numeric> using namespace std; inline void checkCuda(hipError_t result) { if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); exit(EXIT_FAILURE); } } unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } struct is_order_less { __host__ __device__ bool operator() (const int x) { return (x < 0); } }; extern "C" int composeOnGPU(vector<double> input_coeff, vector<list<int> > terms, int inputSize, vector<int*> other_exp, vector<double*> other_coeff, vector<uint> otherSize, int order, int* final_exponents, double* final_coeffs) { double inits = omp_get_wtime(); int nr_terms = terms.size(); int nr_functions = otherSize.size(); hipStream_t streams[nr_terms]; for (int i = 0; i < nr_terms; ++i) hipStreamCreate(&streams[i]); //duplicate the input polynoms for each stream //allocate device memory for input //////////////////////////////////////////// int *e_input; double *c_input; unsigned int mem_size_exp_input = 0; unsigned int mem_size_coeff_input = 0; int all_size = accumulate(otherSize.begin(), otherSize.end(), 0); mem_size_exp_input = NRVARS * all_size * nr_terms * sizeof(int); mem_size_coeff_input = all_size * nr_terms * sizeof(double); checkCuda(hipMalloc((void **) &e_input, mem_size_exp_input)); checkCuda(hipMalloc((void **) &c_input, mem_size_coeff_input)); //////////////////////////////////////////// //determine maximum size to use for allocation //allocate memory for multiplication result, aux result, stencil, indices on each stream unsigned int max_size = *(max_element(otherSize.begin(), otherSize.end())); unsigned int result_size = 8 * max_size * max_size; unsigned int mem_size_result_exp = result_size * NRVARS * sizeof(int) * nr_terms; unsigned int mem_size_result_coeff = result_size * sizeof(double) * nr_terms; int *e_result, *e_aux; double *c_result, *c_aux; checkCuda(hipMalloc((void **) &e_result, mem_size_result_exp)); checkCuda(hipMalloc((void **) &c_result, mem_size_result_coeff)); checkCuda(hipMalloc((void **) &e_aux, mem_size_result_exp)); checkCuda(hipMalloc((void **) &c_aux, mem_size_result_coeff)); //determine next power of 2 for stencil for stream compactation unsigned int max_size_pow2 = nextPow2(max_size * max_size); unsigned int stencil_size = 8 * max_size_pow2; unsigned int mem_size_stencil = stencil_size * sizeof(unsigned int) * nr_terms; unsigned int* stencil, *indices; checkCuda(hipMalloc((void **) &stencil, mem_size_stencil)); checkCuda(hipMalloc((void **) &indices, mem_size_stencil)); //alloc memoy for keys unsigned long long *e_keys, *keys; unsigned int mem_size_keys = result_size * sizeof(unsigned long long) * nr_terms; checkCuda(hipMalloc((void **) &e_keys, mem_size_keys)); checkCuda(hipMalloc((void **) &keys, mem_size_keys)); /////////////////////////////////////////////// //copy data to GPU for (int i = 0; i < nr_terms; ++i){ //compute offset int offset_exp = i * all_size * NRVARS; int offset_coeff = i * all_size; for (int j = 0; j < nr_functions; ++j){ //compute offset if (j != 0){ offset_exp += otherSize[j - 1] * NRVARS; offset_coeff += otherSize[j - 1]; } int mem_size_exp_j = otherSize[j] * NRVARS * sizeof(int); int mem_size_coeff_j = otherSize[j] * sizeof(double); checkCuda(hipMemcpyAsync(e_input + offset_exp, other_exp[j], mem_size_exp_j, hipMemcpyHostToDevice, streams[i])); checkCuda(hipMemcpyAsync(c_input + offset_coeff, other_coeff[j], mem_size_coeff_j, hipMemcpyHostToDevice, streams[i])); } } //cout << result_size << " " << stencil_size << endl; //////////////////////////////////////////// //compute number of multiplications int iterations = -nr_terms; for (vector<list<int> >:: const_iterator it = terms.begin(); it != terms.end(); ++it) iterations += it->size(); unsigned int index_result_exp, index_result_coeff, index_input_exp, index_input_coeff; vector<pair<int*, double*> > p_result; vector<pair<int*, double*> > p_aux; for (int i = 0; i < nr_terms; i ++){ index_result_exp = i * result_size * NRVARS; index_result_coeff = i * result_size; p_result.push_back(make_pair(e_result + index_result_exp, c_result + index_result_coeff)); } vector<int> single_terms; uint* sizes_aux; checkCuda(hipHostMalloc((void **) &sizes_aux, sizeof(uint) * nr_terms)); for (int i = 0; i < nr_terms; i ++){ index_input_exp = i * all_size * NRVARS; index_input_coeff = i * all_size; int index = terms[i].front(); terms[i].pop_front(); if (terms[i].empty()) single_terms.push_back(i); for (int k = 1; k <= index; k ++){ index_input_exp += otherSize[k - 1] * NRVARS; index_input_coeff += otherSize[k - 1]; } sizes_aux[i] = otherSize[index]; p_aux.push_back(make_pair(e_input + index_input_exp, c_input + index_input_coeff)); } vector<bool> firstmult(nr_terms, true); vector<uint> dimRes(nr_terms, 0); vector<uint> is_zero(nr_terms, 0); double inite = omp_get_wtime(); cout << "init + copy " << 1000 * (inite - inits) << endl; double start = omp_get_wtime(); for (int i = 0; i < single_terms.size(); i ++) get_keys(keys + single_terms[i] * result_size, p_aux[single_terms[i]].first, sizes_aux[single_terms[i]], streams[single_terms[i]]); unsigned int dimResult, dimInput, dimAux; while (iterations > 0){ //launch kernels from each term #pragma omp parallel for private(index_input_exp, index_input_coeff, index_result_exp, index_result_coeff, dimInput, dimAux, dimResult) shared(iterations, indices, terms, dimRes, p_aux, p_result, stencil, order, streams, input_coeff) num_threads(32) for (int i = 0; i < nr_terms; i ++){ if (!terms[i].empty()) { int index = terms[i].front(); index_input_exp = i * all_size * NRVARS; index_input_coeff = i * all_size; for (int k = 1; k <= index; k ++){ index_input_exp += otherSize[k - 1] * NRVARS; index_input_coeff += otherSize[k - 1]; } //hipStreamSynchronize(streams[i]); dimInput = otherSize[index]; dimAux = sizes_aux[i]; dimResult = dimInput * dimAux; dimRes[i] = dimResult; if (terms[i].size() == 1){ multiply_truncate_key(p_result[i].first, e_keys + i * result_size, p_aux[i].first, e_input + index_input_exp, p_result[i].second, p_aux[i].second, c_input + index_input_coeff, dimResult, dimAux, dimInput, order, stencil + i * stencil_size, input_coeff[i], streams[i]); } else { multiply_truncate(p_result[i].first, p_aux[i].first, e_input + index_input_exp, p_result[i].second, p_aux[i].second, c_input + index_input_coeff, dimResult, dimAux, dimInput, order, stencil + i * stencil_size, streams[i]); } if (stencil_size <= 4096) scanExclusiveShort(indices + i * stencil_size, stencil + i * stencil_size, stencil_size, streams[i]); else scanExclusiveLarge(indices + i * stencil_size, stencil + i * stencil_size, stencil_size, streams[i]); checkCuda(hipMemcpyAsync(&sizes_aux[i], indices + i * stencil_size + dimResult, sizeof(uint), hipMemcpyDeviceToHost, streams[i])); hipStreamSynchronize(streams[i]); if (firstmult[i]){ firstmult[i] = false; index_result_exp = i * result_size * NRVARS; index_result_coeff = i * result_size; p_aux[i] = make_pair(e_aux + index_result_exp, c_aux + index_result_coeff); } //cout << sizes_aux[i] << endl; //kernel for truncation // uint dimGrid = dimRes[i]/THREADBLOCK_SIZE; //if (dimRes[i] % THREADBLOCK_SIZE != 0) // dimGrid ++; if (terms[i].size() == 1) compact_pol_key(e_keys + i * result_size, p_result[i].second, stencil + i * stencil_size, indices + i * stencil_size, dimResult, sizes_aux[i], keys + i * result_size, p_aux[i].second, streams[i]); //stream_compactation_key<NRVARS><<<dimGrid, THREADBLOCK_SIZE, 0, streams[i]>>>(e_keys + i * result_size, p_result[i].second, stencil + i * stencil_size, indices + i * stencil_size, dimResult, sizes_aux[i], keys + i * result_size, p_aux[i].second); else compact_pol(p_result[i].first, p_result[i].second, stencil + i * stencil_size, indices + i * stencil_size, dimResult, sizes_aux[i], p_aux[i].first, p_aux[i].second, streams[i]); //stream_compactation<THREADBLOCK_SIZE, NRVARS><<<dimGrid, THREADBLOCK_SIZE, 0, streams[i]>>>(p_result[i].first, p_result[i].second, stencil + i * stencil_size, indices + i * stencil_size, dimResult, sizes_aux[i], p_aux[i].first, p_aux[i].second); terms[i].pop_front(); #pragma omp critical iterations --; } } } double endt = omp_get_wtime(); cout << "iter " << 1000 * (endt - start) << endl; hipDeviceSynchronize(); double startcopy = omp_get_wtime(); uint size_keys = 0; for (int i = 0; i < nr_terms; i ++) size_keys += sizes_aux[i]; cout << "total " << size_keys << endl; thrust::device_ptr<unsigned long long> e_keys_ptr(e_keys); thrust::device_ptr<unsigned long long> keys_ptr(keys); int index_keys = 0; thrust::device_ptr<double> c_result_ptr(c_result); thrust::device_ptr<double> c_aux_ptr(c_aux); for (int i = 0; i < nr_terms; i ++){ thrust::copy(keys_ptr + i * result_size, keys_ptr + i * result_size + sizes_aux[i], e_keys_ptr + index_keys); thrust::copy(c_aux_ptr + i * result_size, c_aux_ptr + i * result_size + sizes_aux[i], c_result_ptr + index_keys); index_keys += sizes_aux[i]; } double endcopy = omp_get_wtime(); cout << "copy " << 1000 * (endcopy - startcopy) << endl; double startsort = omp_get_wtime(); thrust::sort_by_key(e_keys_ptr, e_keys_ptr + size_keys, c_result_ptr); double endsort = omp_get_wtime(); cout << "sort " << 1000 * (endsort - startsort) << endl; //reduce by key double startreduce = omp_get_wtime(); thrust::pair<thrust::device_ptr<unsigned long long>, thrust::device_ptr<double> > end; end = thrust::reduce_by_key(e_keys_ptr, e_keys_ptr + size_keys, c_result_ptr, keys_ptr, c_aux_ptr); size_keys = end.first - keys_ptr; double endreduce = omp_get_wtime(); cout << "reduce " << 1000 * (endreduce - startreduce) << endl; cout << "size " << size_keys << endl; get_exponents(e_result, keys, size_keys); //coeffs are in c_aux_ptr for (int i = 0; i < nr_terms; ++i) hipStreamDestroy(streams[i]); checkCuda(hipMemcpy(final_exponents, e_result, size_keys * sizeof(int) * NRVARS, hipMemcpyDeviceToHost)); checkCuda(hipMemcpy(final_coeffs, c_aux, size_keys * sizeof(double), hipMemcpyDeviceToHost)); //free memory hipFree(e_input); hipFree(c_input); hipFree(e_result); hipFree(c_result); hipFree(e_aux); hipFree(c_aux); hipFree(e_keys); hipFree(keys); hipFree(stencil); hipFree(indices); return size_keys; }
2a7af526da68b68d1ddac08a41862ee138c3c9c4.cu
/* * Diana-Andreea Popescu, EPFL & CERN, Switerland. All rights reserved. */ #include <stdio.h> #include <assert.h> #include <math.h> // CUDA runtime #include <cuda_runtime.h> #include <cuda.h> #include <device_launch_parameters.h> #include "Comp.cuh" // Helper functions and utilities to work with CUDA #include <helper_functions.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <omp.h> #include <vector> #include <list> #include <algorithm> #include <numeric> using namespace std; inline void checkCuda(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); exit(EXIT_FAILURE); } } unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } struct is_order_less { __host__ __device__ bool operator() (const int x) { return (x < 0); } }; extern "C" int composeOnGPU(vector<double> input_coeff, vector<list<int> > terms, int inputSize, vector<int*> other_exp, vector<double*> other_coeff, vector<uint> otherSize, int order, int* final_exponents, double* final_coeffs) { double inits = omp_get_wtime(); int nr_terms = terms.size(); int nr_functions = otherSize.size(); cudaStream_t streams[nr_terms]; for (int i = 0; i < nr_terms; ++i) cudaStreamCreate(&streams[i]); //duplicate the input polynoms for each stream //allocate device memory for input //////////////////////////////////////////// int *e_input; double *c_input; unsigned int mem_size_exp_input = 0; unsigned int mem_size_coeff_input = 0; int all_size = accumulate(otherSize.begin(), otherSize.end(), 0); mem_size_exp_input = NRVARS * all_size * nr_terms * sizeof(int); mem_size_coeff_input = all_size * nr_terms * sizeof(double); checkCuda(cudaMalloc((void **) &e_input, mem_size_exp_input)); checkCuda(cudaMalloc((void **) &c_input, mem_size_coeff_input)); //////////////////////////////////////////// //determine maximum size to use for allocation //allocate memory for multiplication result, aux result, stencil, indices on each stream unsigned int max_size = *(max_element(otherSize.begin(), otherSize.end())); unsigned int result_size = 8 * max_size * max_size; unsigned int mem_size_result_exp = result_size * NRVARS * sizeof(int) * nr_terms; unsigned int mem_size_result_coeff = result_size * sizeof(double) * nr_terms; int *e_result, *e_aux; double *c_result, *c_aux; checkCuda(cudaMalloc((void **) &e_result, mem_size_result_exp)); checkCuda(cudaMalloc((void **) &c_result, mem_size_result_coeff)); checkCuda(cudaMalloc((void **) &e_aux, mem_size_result_exp)); checkCuda(cudaMalloc((void **) &c_aux, mem_size_result_coeff)); //determine next power of 2 for stencil for stream compactation unsigned int max_size_pow2 = nextPow2(max_size * max_size); unsigned int stencil_size = 8 * max_size_pow2; unsigned int mem_size_stencil = stencil_size * sizeof(unsigned int) * nr_terms; unsigned int* stencil, *indices; checkCuda(cudaMalloc((void **) &stencil, mem_size_stencil)); checkCuda(cudaMalloc((void **) &indices, mem_size_stencil)); //alloc memoy for keys unsigned long long *e_keys, *keys; unsigned int mem_size_keys = result_size * sizeof(unsigned long long) * nr_terms; checkCuda(cudaMalloc((void **) &e_keys, mem_size_keys)); checkCuda(cudaMalloc((void **) &keys, mem_size_keys)); /////////////////////////////////////////////// //copy data to GPU for (int i = 0; i < nr_terms; ++i){ //compute offset int offset_exp = i * all_size * NRVARS; int offset_coeff = i * all_size; for (int j = 0; j < nr_functions; ++j){ //compute offset if (j != 0){ offset_exp += otherSize[j - 1] * NRVARS; offset_coeff += otherSize[j - 1]; } int mem_size_exp_j = otherSize[j] * NRVARS * sizeof(int); int mem_size_coeff_j = otherSize[j] * sizeof(double); checkCuda(cudaMemcpyAsync(e_input + offset_exp, other_exp[j], mem_size_exp_j, cudaMemcpyHostToDevice, streams[i])); checkCuda(cudaMemcpyAsync(c_input + offset_coeff, other_coeff[j], mem_size_coeff_j, cudaMemcpyHostToDevice, streams[i])); } } //cout << result_size << " " << stencil_size << endl; //////////////////////////////////////////// //compute number of multiplications int iterations = -nr_terms; for (vector<list<int> >:: const_iterator it = terms.begin(); it != terms.end(); ++it) iterations += it->size(); unsigned int index_result_exp, index_result_coeff, index_input_exp, index_input_coeff; vector<pair<int*, double*> > p_result; vector<pair<int*, double*> > p_aux; for (int i = 0; i < nr_terms; i ++){ index_result_exp = i * result_size * NRVARS; index_result_coeff = i * result_size; p_result.push_back(make_pair(e_result + index_result_exp, c_result + index_result_coeff)); } vector<int> single_terms; uint* sizes_aux; checkCuda(cudaMallocHost((void **) &sizes_aux, sizeof(uint) * nr_terms)); for (int i = 0; i < nr_terms; i ++){ index_input_exp = i * all_size * NRVARS; index_input_coeff = i * all_size; int index = terms[i].front(); terms[i].pop_front(); if (terms[i].empty()) single_terms.push_back(i); for (int k = 1; k <= index; k ++){ index_input_exp += otherSize[k - 1] * NRVARS; index_input_coeff += otherSize[k - 1]; } sizes_aux[i] = otherSize[index]; p_aux.push_back(make_pair(e_input + index_input_exp, c_input + index_input_coeff)); } vector<bool> firstmult(nr_terms, true); vector<uint> dimRes(nr_terms, 0); vector<uint> is_zero(nr_terms, 0); double inite = omp_get_wtime(); cout << "init + copy " << 1000 * (inite - inits) << endl; double start = omp_get_wtime(); for (int i = 0; i < single_terms.size(); i ++) get_keys(keys + single_terms[i] * result_size, p_aux[single_terms[i]].first, sizes_aux[single_terms[i]], streams[single_terms[i]]); unsigned int dimResult, dimInput, dimAux; while (iterations > 0){ //launch kernels from each term #pragma omp parallel for private(index_input_exp, index_input_coeff, index_result_exp, index_result_coeff, dimInput, dimAux, dimResult) shared(iterations, indices, terms, dimRes, p_aux, p_result, stencil, order, streams, input_coeff) num_threads(32) for (int i = 0; i < nr_terms; i ++){ if (!terms[i].empty()) { int index = terms[i].front(); index_input_exp = i * all_size * NRVARS; index_input_coeff = i * all_size; for (int k = 1; k <= index; k ++){ index_input_exp += otherSize[k - 1] * NRVARS; index_input_coeff += otherSize[k - 1]; } //cudaStreamSynchronize(streams[i]); dimInput = otherSize[index]; dimAux = sizes_aux[i]; dimResult = dimInput * dimAux; dimRes[i] = dimResult; if (terms[i].size() == 1){ multiply_truncate_key(p_result[i].first, e_keys + i * result_size, p_aux[i].first, e_input + index_input_exp, p_result[i].second, p_aux[i].second, c_input + index_input_coeff, dimResult, dimAux, dimInput, order, stencil + i * stencil_size, input_coeff[i], streams[i]); } else { multiply_truncate(p_result[i].first, p_aux[i].first, e_input + index_input_exp, p_result[i].second, p_aux[i].second, c_input + index_input_coeff, dimResult, dimAux, dimInput, order, stencil + i * stencil_size, streams[i]); } if (stencil_size <= 4096) scanExclusiveShort(indices + i * stencil_size, stencil + i * stencil_size, stencil_size, streams[i]); else scanExclusiveLarge(indices + i * stencil_size, stencil + i * stencil_size, stencil_size, streams[i]); checkCuda(cudaMemcpyAsync(&sizes_aux[i], indices + i * stencil_size + dimResult, sizeof(uint), cudaMemcpyDeviceToHost, streams[i])); cudaStreamSynchronize(streams[i]); if (firstmult[i]){ firstmult[i] = false; index_result_exp = i * result_size * NRVARS; index_result_coeff = i * result_size; p_aux[i] = make_pair(e_aux + index_result_exp, c_aux + index_result_coeff); } //cout << sizes_aux[i] << endl; //kernel for truncation // uint dimGrid = dimRes[i]/THREADBLOCK_SIZE; //if (dimRes[i] % THREADBLOCK_SIZE != 0) // dimGrid ++; if (terms[i].size() == 1) compact_pol_key(e_keys + i * result_size, p_result[i].second, stencil + i * stencil_size, indices + i * stencil_size, dimResult, sizes_aux[i], keys + i * result_size, p_aux[i].second, streams[i]); //stream_compactation_key<NRVARS><<<dimGrid, THREADBLOCK_SIZE, 0, streams[i]>>>(e_keys + i * result_size, p_result[i].second, stencil + i * stencil_size, indices + i * stencil_size, dimResult, sizes_aux[i], keys + i * result_size, p_aux[i].second); else compact_pol(p_result[i].first, p_result[i].second, stencil + i * stencil_size, indices + i * stencil_size, dimResult, sizes_aux[i], p_aux[i].first, p_aux[i].second, streams[i]); //stream_compactation<THREADBLOCK_SIZE, NRVARS><<<dimGrid, THREADBLOCK_SIZE, 0, streams[i]>>>(p_result[i].first, p_result[i].second, stencil + i * stencil_size, indices + i * stencil_size, dimResult, sizes_aux[i], p_aux[i].first, p_aux[i].second); terms[i].pop_front(); #pragma omp critical iterations --; } } } double endt = omp_get_wtime(); cout << "iter " << 1000 * (endt - start) << endl; cudaThreadSynchronize(); double startcopy = omp_get_wtime(); uint size_keys = 0; for (int i = 0; i < nr_terms; i ++) size_keys += sizes_aux[i]; cout << "total " << size_keys << endl; thrust::device_ptr<unsigned long long> e_keys_ptr(e_keys); thrust::device_ptr<unsigned long long> keys_ptr(keys); int index_keys = 0; thrust::device_ptr<double> c_result_ptr(c_result); thrust::device_ptr<double> c_aux_ptr(c_aux); for (int i = 0; i < nr_terms; i ++){ thrust::copy(keys_ptr + i * result_size, keys_ptr + i * result_size + sizes_aux[i], e_keys_ptr + index_keys); thrust::copy(c_aux_ptr + i * result_size, c_aux_ptr + i * result_size + sizes_aux[i], c_result_ptr + index_keys); index_keys += sizes_aux[i]; } double endcopy = omp_get_wtime(); cout << "copy " << 1000 * (endcopy - startcopy) << endl; double startsort = omp_get_wtime(); thrust::sort_by_key(e_keys_ptr, e_keys_ptr + size_keys, c_result_ptr); double endsort = omp_get_wtime(); cout << "sort " << 1000 * (endsort - startsort) << endl; //reduce by key double startreduce = omp_get_wtime(); thrust::pair<thrust::device_ptr<unsigned long long>, thrust::device_ptr<double> > end; end = thrust::reduce_by_key(e_keys_ptr, e_keys_ptr + size_keys, c_result_ptr, keys_ptr, c_aux_ptr); size_keys = end.first - keys_ptr; double endreduce = omp_get_wtime(); cout << "reduce " << 1000 * (endreduce - startreduce) << endl; cout << "size " << size_keys << endl; get_exponents(e_result, keys, size_keys); //coeffs are in c_aux_ptr for (int i = 0; i < nr_terms; ++i) cudaStreamDestroy(streams[i]); checkCuda(cudaMemcpy(final_exponents, e_result, size_keys * sizeof(int) * NRVARS, cudaMemcpyDeviceToHost)); checkCuda(cudaMemcpy(final_coeffs, c_aux, size_keys * sizeof(double), cudaMemcpyDeviceToHost)); //free memory cudaFree(e_input); cudaFree(c_input); cudaFree(e_result); cudaFree(c_result); cudaFree(e_aux); cudaFree(c_aux); cudaFree(e_keys); cudaFree(keys); cudaFree(stencil); cudaFree(indices); return size_keys; }
1de0baf1f33dbcc4c9dd5e9135a9cd4fdbc7ecb5.hip
// !!! This is a file automatically generated by hipify!!! #include "random.h" #include <stdio.h> #include <assert.h> const int THREADS_PER_CORE = 3; int Config::blocks(int device){ return this->smCount(device) * THREADS_PER_CORE; } int Config::threadsPerBlock(int device){ int threads = this->totalNumThreads(device); int blocks = this->blocks(device); assert(!(threads%blocks)); return threads/blocks; } int Config::totalNumThreads(int device){ return this->cudaCores(device) * THREADS_PER_CORE; } int calculate_warps_per_sm(hipDeviceProp_t prop){ int warp_per_sm; //https://en.wikipedia.org/wiki/CUDA#Version_features_and_specifications //table: architecture specifications of the compute capability major.minor version switch(prop.major){ case 1: warp_per_sm=1; break; case 2: warp_per_sm=2; break; case 3: warp_per_sm=4; break; case 5: warp_per_sm=4; break; case 6: if(prop.minor == 0) { warp_per_sm = 2; } else { warp_per_sm = 4; } break; case 7: warp_per_sm = 4; break; default: printf("Warning: default warp_per_sm = 4 used"); warp_per_sm = 4; break; } return warp_per_sm; } Config::Config(){ int count = 0; hipGetDeviceCount(&count); this->device_count=count; hipDeviceProp_t props[count]; int wps[count]; for(int ii =0; ii<count; ii++){ hipGetDeviceProperties(&props[ii], ii); wps[ii] = calculate_warps_per_sm(props[ii]); } this->prop = props; this->warp_per_sm = wps; } int Config::deviceCount() { return this->device_count; } int Config::threadsPerWarp(int device){ return this->prop[device].warpSize; } int Config::smCount(int device){ return this->prop[device].multiProcessorCount; } int Config::warpPerSm(int device){ return this->warp_per_sm[device]; } int Config::cudaCores(int device){ return this->warpPerSm(device) * this->threadsPerWarp(device) * this->smCount(device); } void Config::printName(int device){ printf("GPU #%d: %s\n", device, this->prop[device].name); }
1de0baf1f33dbcc4c9dd5e9135a9cd4fdbc7ecb5.cu
#include "random.h" #include <stdio.h> #include <assert.h> const int THREADS_PER_CORE = 3; int Config::blocks(int device){ return this->smCount(device) * THREADS_PER_CORE; } int Config::threadsPerBlock(int device){ int threads = this->totalNumThreads(device); int blocks = this->blocks(device); assert(!(threads%blocks)); return threads/blocks; } int Config::totalNumThreads(int device){ return this->cudaCores(device) * THREADS_PER_CORE; } int calculate_warps_per_sm(cudaDeviceProp prop){ int warp_per_sm; //https://en.wikipedia.org/wiki/CUDA#Version_features_and_specifications //table: architecture specifications of the compute capability major.minor version switch(prop.major){ case 1: warp_per_sm=1; break; case 2: warp_per_sm=2; break; case 3: warp_per_sm=4; break; case 5: warp_per_sm=4; break; case 6: if(prop.minor == 0) { warp_per_sm = 2; } else { warp_per_sm = 4; } break; case 7: warp_per_sm = 4; break; default: printf("Warning: default warp_per_sm = 4 used"); warp_per_sm = 4; break; } return warp_per_sm; } Config::Config(){ int count = 0; cudaGetDeviceCount(&count); this->device_count=count; cudaDeviceProp props[count]; int wps[count]; for(int ii =0; ii<count; ii++){ cudaGetDeviceProperties(&props[ii], ii); wps[ii] = calculate_warps_per_sm(props[ii]); } this->prop = props; this->warp_per_sm = wps; } int Config::deviceCount() { return this->device_count; } int Config::threadsPerWarp(int device){ return this->prop[device].warpSize; } int Config::smCount(int device){ return this->prop[device].multiProcessorCount; } int Config::warpPerSm(int device){ return this->warp_per_sm[device]; } int Config::cudaCores(int device){ return this->warpPerSm(device) * this->threadsPerWarp(device) * this->smCount(device); } void Config::printName(int device){ printf("GPU #%d: %s\n", device, this->prop[device].name); }
6720309f41b4e03057bc15e0d5f41be27e67aa1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride + i * mat2_col_stride; if (i < mat_dim.cols && j < mat_dim.rows) { mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dmat.cols && j < dmat.rows) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = j * dmat.stride + i; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { // we interpret these indexes oppositely from normal, but it doesn't // matter as it's invoked in a symmetric way. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index. int32_cuda index_out = i + j * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.cols && j < d_out.rows) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index out int32_cuda index_out = i + j * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (j < d_out.rows && i < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].row * d_out.stride + smat_in[smat_index].column; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].column * d_out.stride + smat_in[smat_index].row; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].row * mat_d_in.stride + smat_in[smat_index].column; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].column * mat_d_in.stride + smat_in[smat_index].row; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _transpose_matrix(Real* mat, MatrixDim d) { // Transposes a square matrix in-place. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j >= i || i >= d.rows) { return; } // Only half the threads act. int32_cuda index_a = j + i * d.stride, index_b = i + j * d.stride; Real a = mat[index_a], b = mat[index_b]; mat[index_a] = b; mat[index_b] = a; } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. max_element = maxv[src_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (j >= d.rows ) return; //invert divider in shared memory __shared__ Real inv[16]; if(threadIdx.x==0) { inv[threadIdx.y] = 1.0/vec_div[j]; } __syncthreads(); //multiply elements if (i < d.cols && j < d.rows) mat[index] *= inv[threadIdx.y]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha * src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride + j * mat2_row_stride; if (j < mat_dim.rows && i < mat_dim.cols) mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index]; } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index. int j = blockIdx.y * blockDim.y + threadIdx.y; // row index. if (i < d.cols && j < d.rows) { int index = i + j * d.stride; m_out[index] = v_in[i]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // _trace_mat_mat expects to be called with 1 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 0]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; int block_size = (num_elements + num_threads - 1) / num_threads; int loop_start = i * block_size, loop_end = (i + 1) * block_size; if (loop_end > num_elements) loop_end = num_elements; Real sum = 0.0; for (int j = loop_start; j < loop_end; j++) { // for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = row + col * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 3]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; // int block_size = (num_elements + num_threads - 1) / num_threads; // int loop_start = i * block_size, loop_end = (i + 1) * block_size; // if (loop_end > num_elements) // loop_end = num_elements; Real sum = 0.0; // for (int j = loop_start; j < loop_end; j++) { for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = col + row * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx < v_dim) { Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; } // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0 && v_idx < v_dim) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int num_elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_elements) return; data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight; } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + reorder[i]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + index; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[j], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = reorder[j] * src_stride + i; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; const Real *pointer = src[j]; if (pointer != NULL) { dst[dst_index] = pointer[i]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { Real *pointer = dst[j]; if (pointer != NULL) { pointer[i] = src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (reorder[j] >= 0) { int src_index = reorder[j] * src_stride + i; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (src[j] != NULL) { dst[dst_index] += alpha * src[j][i]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { if (dst[j] != NULL) { dst[j][i] += alpha * src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } template<typename Real> __global__ static void _group_max(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_begin_index = i * group_size + j * src_stride; Real max_value = -1e20; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { if (!isnan(x[src_index]) && x[src_index] > max_value) max_value = x[src_index]; } y[dst_index] = max_value; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _square_relu(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] <= 0) { mat[index] = floor_val; } else { mat[index] = sqrt(mat[index]); } } } template<typename Real> __global__ static void _diff_square_relu(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = (mat[index] > 0.0 ? 1.0/2*sqrt(mat[index]) : 0.0); } template<typename Real> __global__ static void _parametric_relu(Real*y, const Real*x, MatrixDim d, int src_stride, const Real*a, const Real*b) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0) ? a[i] * x[src_index] : b[i] * x[src_index]; y[dst_index] = res; } } template<typename Real> __global__ static void _diff_parametric_relu(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride, const Real*a, const Real*b) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (y[y_index] > 0.0 ? a[i] * e[e_index] : b[i] * e[e_index]); } template<typename Real> __global__ static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0 ? 1.0 : 0.0); y[dst_index] = res; } } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _log_softmax_reduce(Real *y, const Real *x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; // Maximum step 1: loads input data to <aux>. If <d.cols> is larger than // <blockDim.x>, then we do a first pass filtering and only // keep a <blockDim.x> size array. aux[threadIdx.x] = x[threadIdx.x + j * d.stride]; for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x + i * THREADS + j * d.stride]) aux[threadIdx.x] = x[threadIdx.x + i * THREADS + j * d.stride]; } // Maximum step 2: the standard max reduce. int nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x + halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real max = aux[0]; __syncthreads(); // Log sum step 1: substracts max, and takes exponentials. y[threadIdx.x + j * d.stride] = x[threadIdx.x + j * d.stride] - max; aux[threadIdx.x] = exp(y[threadIdx.x + j * d.stride]); for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] = x[threadIdx.x + i * THREADS + j * d.stride] - max; aux[threadIdx.x] += exp(y[threadIdx.x + i * THREADS + j * d.stride]); } } // Log sum step 2: comptes summation and then takes logarithm. nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real log_sum = log(aux[0]); __syncthreads(); // Computes log softmax. for (int i = 0; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] -= log_sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // if (i < dim.cols && j < dim.rows) { int dst_index = i + j * dim.stride, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_diff_square_relu(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _diff_square_relu), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const* dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const* dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaF_square_relu(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { hipLaunchKernelGGL(( _square_relu), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans<float,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat<float,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v, pvec_sum, dim, size); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int num_elements) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_max), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } //<<<<<<< HEAD void cudaF_parametric_relu (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride, const float* a, const float* b) { hipLaunchKernelGGL(( _parametric_relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, a, b); } void cudaF_diff_parametric_relu (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride, const float* a, const float* b) { hipLaunchKernelGGL(( _diff_parametric_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride, a, b); } //======= void cudaF_heaviside (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } //>>>>>>> master void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_log_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_diff_square_relu(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _diff_square_relu), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const* dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const* dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaD_square_relu(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { hipLaunchKernelGGL(( _square_relu), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans<double,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat<double,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v,pvec_sum,dim,size); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int num_elements) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_max), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } //<<<<<<< HEAD void cudaD_parametric_relu (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, const double* a, const double* b) { hipLaunchKernelGGL(( _parametric_relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, a, b); } void cudaD_diff_parametric_relu (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride, const double* a, const double* b) { hipLaunchKernelGGL(( _diff_parametric_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride, a, b); } //======= void cudaD_heaviside (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } //>>>>>>> master void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_log_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); }
6720309f41b4e03057bc15e0d5f41be27e67aa1a.cu
// cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride + i * mat2_col_stride; if (i < mat_dim.cols && j < mat_dim.rows) { mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dmat.cols && j < dmat.rows) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = j * dmat.stride + i; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { // we interpret these indexes oppositely from normal, but it doesn't // matter as it's invoked in a symmetric way. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index. int32_cuda index_out = i + j * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.cols && j < d_out.rows) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index out int32_cuda index_out = i + j * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (j < d_out.rows && i < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].row * d_out.stride + smat_in[smat_index].column; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].column * d_out.stride + smat_in[smat_index].row; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].row * mat_d_in.stride + smat_in[smat_index].column; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].column * mat_d_in.stride + smat_in[smat_index].row; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _transpose_matrix(Real* mat, MatrixDim d) { // Transposes a square matrix in-place. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j >= i || i >= d.rows) { return; } // Only half the threads act. int32_cuda index_a = j + i * d.stride, index_b = i + j * d.stride; Real a = mat[index_a], b = mat[index_b]; mat[index_a] = b; mat[index_b] = a; } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. max_element = maxv[src_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (j >= d.rows ) return; //invert divider in shared memory __shared__ Real inv[16]; if(threadIdx.x==0) { inv[threadIdx.y] = 1.0/vec_div[j]; } __syncthreads(); //multiply elements if (i < d.cols && j < d.rows) mat[index] *= inv[threadIdx.y]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha * src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride + j * mat2_row_stride; if (j < mat_dim.rows && i < mat_dim.cols) mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index]; } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index. int j = blockIdx.y * blockDim.y + threadIdx.y; // row index. if (i < d.cols && j < d.rows) { int index = i + j * d.stride; m_out[index] = v_in[i]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // _trace_mat_mat expects to be called with 1 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 0]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; int block_size = (num_elements + num_threads - 1) / num_threads; int loop_start = i * block_size, loop_end = (i + 1) * block_size; if (loop_end > num_elements) loop_end = num_elements; Real sum = 0.0; for (int j = loop_start; j < loop_end; j++) { // for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = row + col * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 3]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; // int block_size = (num_elements + num_threads - 1) / num_threads; // int loop_start = i * block_size, loop_end = (i + 1) * block_size; // if (loop_end > num_elements) // loop_end = num_elements; Real sum = 0.0; // for (int j = loop_start; j < loop_end; j++) { for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = col + row * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx < v_dim) { Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; } // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0 && v_idx < v_dim) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int num_elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_elements) return; data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight; } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + reorder[i]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + index; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[j], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = reorder[j] * src_stride + i; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; const Real *pointer = src[j]; if (pointer != NULL) { dst[dst_index] = pointer[i]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { Real *pointer = dst[j]; if (pointer != NULL) { pointer[i] = src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (reorder[j] >= 0) { int src_index = reorder[j] * src_stride + i; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (src[j] != NULL) { dst[dst_index] += alpha * src[j][i]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { if (dst[j] != NULL) { dst[j][i] += alpha * src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } template<typename Real> __global__ static void _group_max(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_begin_index = i * group_size + j * src_stride; Real max_value = -1e20; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { if (!isnan(x[src_index]) && x[src_index] > max_value) max_value = x[src_index]; } y[dst_index] = max_value; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _square_relu(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] <= 0) { mat[index] = floor_val; } else { mat[index] = sqrt(mat[index]); } } } template<typename Real> __global__ static void _diff_square_relu(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = (mat[index] > 0.0 ? 1.0/2*sqrt(mat[index]) : 0.0); } template<typename Real> __global__ static void _parametric_relu(Real*y, const Real*x, MatrixDim d, int src_stride, const Real*a, const Real*b) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0) ? a[i] * x[src_index] : b[i] * x[src_index]; y[dst_index] = res; } } template<typename Real> __global__ static void _diff_parametric_relu(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride, const Real*a, const Real*b) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (y[y_index] > 0.0 ? a[i] * e[e_index] : b[i] * e[e_index]); } template<typename Real> __global__ static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0 ? 1.0 : 0.0); y[dst_index] = res; } } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _log_softmax_reduce(Real *y, const Real *x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; // Maximum step 1: loads input data to <aux>. If <d.cols> is larger than // <blockDim.x>, then we do a first pass filtering and only // keep a <blockDim.x> size array. aux[threadIdx.x] = x[threadIdx.x + j * d.stride]; for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x + i * THREADS + j * d.stride]) aux[threadIdx.x] = x[threadIdx.x + i * THREADS + j * d.stride]; } // Maximum step 2: the standard max reduce. int nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x + halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real max = aux[0]; __syncthreads(); // Log sum step 1: substracts max, and takes exponentials. y[threadIdx.x + j * d.stride] = x[threadIdx.x + j * d.stride] - max; aux[threadIdx.x] = exp(y[threadIdx.x + j * d.stride]); for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] = x[threadIdx.x + i * THREADS + j * d.stride] - max; aux[threadIdx.x] += exp(y[threadIdx.x + i * THREADS + j * d.stride]); } } // Log sum step 2: comptes summation and then takes logarithm. nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real log_sum = log(aux[0]); __syncthreads(); // Computes log softmax. for (int i = 0; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] -= log_sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // if (i < dim.cols && j < dim.rows) { int dst_index = i + j * dim.stride, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _transpose_matrix<<<Gr,Bl>>>(mat, d); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaF_diff_square_relu(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _diff_square_relu<<<Gr,Bl>>>(mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const* src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const* dst, const float* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const* src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const* dst, const float* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaF_square_relu(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { _square_relu<<<Gr,Bl>>>(mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat_trans<float,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat<float,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v, value, dim, inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v, pvec_sum, dim, size); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int num_elements) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _group_max<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } //<<<<<<< HEAD void cudaF_parametric_relu (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride, const float* a, const float* b) { _parametric_relu<<<Gr,Bl>>>(y, x, d, src_stride, a, b); } void cudaF_diff_parametric_relu (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride, const float* a, const float* b) { _diff_parametric_relu<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride, a, b); } //======= void cudaF_heaviside (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _heaviside<<<Gr,Bl>>>(y, x, d, src_stride); } //>>>>>>> master void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_log_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { _copy_from_sp<<<Gr,Bl>>>(x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _transpose_matrix<<<Gr,Bl>>>(mat, d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaD_diff_square_relu(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _diff_square_relu<<<Gr,Bl>>>(mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const* src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const* dst, const double* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const* src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const* dst, const double* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaD_square_relu(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { _square_relu<<<Gr,Bl>>>(mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat_trans<double,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat<double,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v,value,dim,inc); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v,pvec_sum,dim,size); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int num_elements) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _group_max<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } //<<<<<<< HEAD void cudaD_parametric_relu (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, const double* a, const double* b) { _parametric_relu<<<Gr,Bl>>>(y, x, d, src_stride, a, b); } void cudaD_diff_parametric_relu (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride, const double* a, const double* b) { _diff_parametric_relu<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride, a, b); } //======= void cudaD_heaviside (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _heaviside<<<Gr,Bl>>>(y, x, d, src_stride); } //>>>>>>> master void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_log_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { _copy_from_sp<<<Gr,Bl>>>(x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { _trace_mat_smat<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { _trace_mat_smat_trans<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { _trace_mat_smat<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { _trace_mat_smat_trans<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); }
feab8ab058c5cded12c8598cb589d98d096f2006.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand.h> #include "dropout.h" bool CheckDropout(const int n_layers, float const *layer_dropout) { bool dropout {false}; for (int l = 0; l < n_layers; ++l) { if (layer_dropout[l] < 1.0f) { dropout = true; break; } } return dropout; } void RandomlySelectDropout(const int n, float *D) { hiprandGenerator_t gen; hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MT19937); hiprandSetGeneratorOrdering(gen, CURAND_ORDERING_PSEUDO_BEST); hiprandSetPseudoRandomGeneratorSeed(gen, 104); hiprandGenerateUniform(gen, D, n); hiprandDestroyGenerator(gen); } __global__ void ApplyDropout(const int n, const float *D, float *A, const float keep_prob) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { if (D[i] > keep_prob) A[i] = 0.0f; else A[i] /= keep_prob; } }
feab8ab058c5cded12c8598cb589d98d096f2006.cu
#include <curand.h> #include "dropout.h" bool CheckDropout(const int n_layers, float const *layer_dropout) { bool dropout {false}; for (int l = 0; l < n_layers; ++l) { if (layer_dropout[l] < 1.0f) { dropout = true; break; } } return dropout; } void RandomlySelectDropout(const int n, float *D) { curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); curandSetGeneratorOrdering(gen, CURAND_ORDERING_PSEUDO_BEST); curandSetPseudoRandomGeneratorSeed(gen, 104); curandGenerateUniform(gen, D, n); curandDestroyGenerator(gen); } __global__ void ApplyDropout(const int n, const float *D, float *A, const float keep_prob) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { if (D[i] > keep_prob) A[i] = 0.0f; else A[i] /= keep_prob; } }
bf6e63bb318fb9c41c66a5afc79d7265fbd1240e.hip
// !!! This is a file automatically generated by hipify!!! // // Created by mrjak on 24-04-2020. // #include "ClusteringGpuBlocks.cuh" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "../../utils/util.h" #include "../../utils/TmpMalloc.cuh" #include "../../structures/ScyTreeArray.h" #define BLOCK_SIZE 1024 #define PI 3.14 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; __device__ float dist_gpu_blocks(int p_id, int q_id, float *X, int *subspace, int subspace_size, int d) { float *p = &X[p_id * d]; float *q = &X[q_id * d]; double distance = 0; for (int i = 0; i < subspace_size; i++) { int d_i = subspace[i]; double diff = p[d_i] - q[d_i]; distance += diff * diff; } //printf("dinstance = %f\n", distance); return sqrt(distance);//todo squared can be removed by sqrt(x)<=y => x<=y*y if x>=0, y>=0 } __global__ void compute_distances_blocks(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims, int *d_neighborhoods_full, int *d_number_of_neighbors_full, float *X, int **d_points_full, int *d_number_of_points, float neighborhood_size, int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells, int n) { for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) { for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { // printf("test-1\n"); int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; float *d_distance_matrix = d_distance_matrix_full + i_dim * number_of_cells * n * n + i_rest * n * n; int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; // printf("test0\n"); for (int i = blockIdx.y; i < number_of_points; i += gridDim.y) { int p_id = d_points[i]; for (int j = threadIdx.x; j < number_of_points; j += blockDim.x) { int q_id = d_points[j]; if (i < j) { float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d); d_distance_matrix[i * number_of_points + j] = distance; } } } } } } __global__ void find_neighborhood_blocks(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims, int *d_neighborhoods_full, int *d_number_of_neighbors_full, float *X, int **d_points_full, int *d_number_of_points, float neighborhood_size, int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells, int n) { for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) { for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { // printf("test-1\n"); int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; float *d_distance_matrix = d_distance_matrix_full + i_dim * number_of_cells * n * n + i_rest * n * n; int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; // printf("test0\n"); for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { int *d_neighborhood = &d_neighborhoods[i * number_of_points]; int number_of_neighbors = 0; int p_id = d_points[i]; for (int j = 0; j < number_of_points; j++) { int q_id = d_points[j]; if (p_id != q_id) { float distance = 0;//dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d); if (i < j) { distance = d_distance_matrix[i * number_of_points + j]; } else if (j < i) { distance = d_distance_matrix[j * number_of_points + i]; } if (neighborhood_size >= distance) { d_neighborhood[number_of_neighbors] = j; number_of_neighbors++; } } } d_number_of_neighbors[i] = number_of_neighbors; } } } } // //__global__ //void //find_neighborhood_blocks(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims, int *d_neighborhoods_full, // int *d_number_of_neighbors_full, // float *X, // int **d_points_full, int *d_number_of_points, float neighborhood_size, // int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells, // int n) { // for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) { // for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { // //// printf("test-1\n"); // int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; // int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; // int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; // int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; // int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; // int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; //// printf("test0\n"); // // // for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { // int *d_neighborhood = &d_neighborhoods[i * number_of_points]; // int number_of_neighbors = 0; // int p_id = d_points[i]; // for (int j = 0; j < number_of_points; j++) { // int q_id = d_points[j]; // if (p_id != q_id) { // float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d); // if (neighborhood_size >= distance) { // d_neighborhood[number_of_neighbors] = j; // number_of_neighbors++; // } // } // } // d_number_of_neighbors[i] = number_of_neighbors; // } // } // } //} __device__ float phi_gpu_blocks(int p_id, int *d_neighborhood, float neighborhood_size, int number_of_neighbors, float *X, int *d_points, int *subspace, int subspace_size, int d) { float sum = 0; for (int j = 0; j < number_of_neighbors; j++) { int q_id = d_points[d_neighborhood[j]]; if (q_id >= 0) { float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d) / neighborhood_size; float sq = distance * distance; sum += (1. - sq); } } return sum; } __device__ float gamma_gpu_blocks(double n) { if (round(n) == 1) {//todo not nice cond n==1 return 1.; } else if (n < 1) {//todo not nice cond n==1/2 return sqrt(PI); } return (n - 1.) * gamma_gpu_blocks(n - 1.); } __device__ double gamma_gpu_blocks(int n) { if (n == 2) { return 1.; } else if (n == 1) { return sqrt(PI); } return (n / 2. - 1.) * gamma_gpu_blocks(n - 2); } __device__ float c_gpu_blocks(int subspace_size) { float r = pow(PI, subspace_size / 2.); //r = r / gamma_gpu_blocks(subspace_size / 2. + 1.); r = r / gamma_gpu_blocks(subspace_size + 2); return r; } __device__ float alpha_gpu_blocks(int subspace_size, float neighborhood_size, int n) { float v = 1.;//todo v is missing?? what is it?? float r = 2 * n * pow(neighborhood_size, subspace_size) * c_gpu_blocks(subspace_size); r = r / (pow(v, subspace_size) * (subspace_size + 2)); return r; } __device__ float omega_gpu_blocks(int subspace_size) { return 2.0 / (subspace_size + 2.0); } __global__ void compute_is_dense_blocks(int *d_restricteds_pr_dim, bool *d_is_dense_full, int **d_points_full, int *d_number_of_points, int *d_neighborhoods_full, float neighborhood_size, int *d_number_of_neighbors_full, float *X, int **d_restricted_dims_full, int *d_number_of_restricted_dims, float F, int n, int num_obj, int d, int number_of_cells) {//todo change name of subspace int i_dim = blockIdx.x; for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n; int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { int *d_neighborhood = &d_neighborhoods[i * number_of_points]; int p_id = d_points[i]; float p = phi_gpu_blocks(p_id, d_neighborhood, neighborhood_size, d_number_of_neighbors[i], X, d_points, subspace, subspace_size, d); float a = alpha_gpu_blocks(subspace_size, neighborhood_size, n); float w = omega_gpu_blocks(subspace_size); // printf("%d, %f>=%f\n",p_id, p, max(F * a, num_obj * w)); // printf("F=%f, a=%f, num_obj=%d, w=%f\n", F, a, num_obj, w); d_is_dense[i] = p >= max(F * a, num_obj * w); } } } __global__ void compute_is_dense_new_blocks(int *d_restricteds_pr_dim, bool *d_is_dense_full, int **d_points_full, int *d_number_of_points, float neighborhood_size, float *X, int **d_restricted_dims_full, int *d_number_of_restricted_dims, float F, int n, int num_obj, int d, int number_of_cells) {//todo change name of subspace int i_dim = blockIdx.x; for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n; for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { int p_id = d_points[i]; // float p = phi_gpu_blocks(p_id, d_neighborhood, neighborhood_size, d_number_of_neighbors[i], X, d_points, // subspace, subspace_size, d); float p = 0; for (int j = 0; j < n; j++) { int q_id = j; if (p_id != q_id) { float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d); if (neighborhood_size >= distance) { distance = distance / neighborhood_size; float sq = distance * distance; p += (1. - sq); } } } float a = alpha_gpu_blocks(subspace_size, neighborhood_size, n); float w = omega_gpu_blocks(subspace_size); // printf("%d:%d, %f>=%f\n", p_id, subspace_size, p, max(F * a, num_obj * w)); // printf("%d:%d, F=%f, a=%f, num_obj=%d, w=%f\n", p_id, subspace_size, F, a, num_obj, w); d_is_dense[i] = p >= max(F * a, num_obj * w); } } } //for ref see: http://hpcg.purdue.edu/papers/Stava2011CCL.pdf __global__ void disjoint_set_clustering_blocks(int *d_restricteds_pr_dim, int *d_clustering_full, int *d_disjoint_set_full, int *d_neighborhoods_full, int *d_number_of_neighbors_full, bool *d_is_dense_full, int **d_points_full, int *d_number_of_points, int number_of_cells, int n) { int i_dim = blockIdx.x; for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *d_clustering = d_clustering_full + i_dim * n; bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n; int *d_disjoint_set = d_disjoint_set_full + i_dim * number_of_cells * n + i_rest * n; int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; __shared__ int changed; changed = 1; __syncthreads(); //init for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { if (d_is_dense[i]) { d_disjoint_set[i] = i; } else { d_disjoint_set[i] = -1; } } __syncthreads(); //for (int itr = 1; itr < number_of_points; itr *= 2) { while (changed) { //disjoint_set_pass1 __syncthreads(); changed = 0; __syncthreads(); for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { if (!d_is_dense[i]) continue; int root = d_disjoint_set[i]; int *d_neighborhood = &d_neighborhoods[i * number_of_points]; for (int j = 0; j < d_number_of_neighbors[i]; j++) { if (d_is_dense[d_neighborhood[j]]) { if (d_disjoint_set[d_neighborhood[j]] < root) { root = d_disjoint_set[d_neighborhood[j]]; atomicMax(&changed, 1); } } } d_disjoint_set[i] = root; } __syncthreads(); //disjoint_set_pass2 for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { int root = d_disjoint_set[i]; while (root >= 0 && root != d_disjoint_set[root]) { root = d_disjoint_set[root]; } d_disjoint_set[i] = root; } __syncthreads(); } //gather_clustering for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { if (d_is_dense[i]) { d_clustering[d_points[i]] = d_points[d_disjoint_set[i]]; }else { d_clustering[d_points[i]] = -1; } } } } void ClusteringGPUBlocks(TmpMalloc *tmps, int *d_clustering_full, vector <vector<ScyTreeArray *>> L_pruned, float *d_X, int n, int d, float neighborhood_size, float F, int num_obj, int number_of_cells) { tmps->reset_counters(); int restricted_dims = L_pruned.size(); int *h_restricteds_pr_dim = new int[restricted_dims]; int **h_points = new int *[restricted_dims * number_of_cells]; int **h_restricted_dims = new int *[restricted_dims * number_of_cells]; int *h_number_of_points = new int[restricted_dims * number_of_cells]; int *h_number_of_restricted_dims = new int[restricted_dims * number_of_cells]; for (int i = 0; i < restricted_dims; i++) { h_restricteds_pr_dim[i] = L_pruned[i].size(); for (int j = 0; j < L_pruned[i].size(); j++) { h_points[i * number_of_cells + j] = L_pruned[i][j]->d_points; h_restricted_dims[i * number_of_cells + j] = L_pruned[i][j]->d_restricted_dims; h_number_of_points[i * number_of_cells + j] = L_pruned[i][j]->number_of_points; h_number_of_restricted_dims[i * number_of_cells + j] = L_pruned[i][j]->number_of_restricted_dims; } } int *d_restricteds_pr_dim = tmps->get_int_array(tmps->int_array_counter++, restricted_dims); // hipMalloc(&d_restricteds_pr_dim, restricted_dims * sizeof(int)); hipMemcpy(d_restricteds_pr_dim, h_restricteds_pr_dim, restricted_dims * sizeof(int), hipMemcpyHostToDevice); gpuErrchk(hipPeekAtLastError()); //todo copy int **d_points = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++, restricted_dims * number_of_cells); int **d_restricted_dims = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++, restricted_dims * number_of_cells); int *d_number_of_points = tmps->get_int_array(tmps->int_array_counter++, restricted_dims * number_of_cells); int *d_number_of_restricted_dims = tmps->get_int_array(tmps->int_array_counter++, restricted_dims * number_of_cells); // hipMalloc(&d_points, restricted_dims * number_of_cells * sizeof(int *)); // hipMalloc(&d_restricted_dims, restricted_dims * number_of_cells * sizeof(int *)); // hipMalloc(&d_number_of_points, restricted_dims * number_of_cells * sizeof(int)); // hipMalloc(&d_number_of_restricted_dims, restricted_dims * number_of_cells * sizeof(int)); gpuErrchk(hipPeekAtLastError()); hipMemcpy(d_points, h_points, restricted_dims * number_of_cells * sizeof(int *), hipMemcpyHostToDevice); gpuErrchk(hipPeekAtLastError()); hipMemcpy(d_restricted_dims, h_restricted_dims, restricted_dims * number_of_cells * sizeof(int *), hipMemcpyHostToDevice); gpuErrchk(hipPeekAtLastError()); hipMemcpy(d_number_of_points, h_number_of_points, restricted_dims * number_of_cells * sizeof(int), hipMemcpyHostToDevice); gpuErrchk(hipPeekAtLastError()); hipMemcpy(d_number_of_restricted_dims, h_number_of_restricted_dims, restricted_dims * number_of_cells * sizeof(int), hipMemcpyHostToDevice); gpuErrchk(hipPeekAtLastError()); int *d_neighborhoods_full = tmps->get_int_array(tmps->int_array_counter++, n * n * restricted_dims * number_of_cells); float *d_distance_matrix_full = tmps->get_float_array(tmps->float_array_counter++, n * n * restricted_dims * number_of_cells); int *d_number_of_neighbors_full = tmps->get_int_array(tmps->int_array_counter++, n * restricted_dims * number_of_cells); hipMemset(d_number_of_neighbors_full, 0, restricted_dims * number_of_cells * n * sizeof(int)); bool *d_is_dense_full = tmps->get_bool_array(tmps->bool_array_counter++, n * restricted_dims * number_of_cells); int *d_disjoint_set_full = tmps->get_int_array(tmps->int_array_counter++, n * restricted_dims * number_of_cells); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); int number_of_threads = min(n, BLOCK_SIZE); if (restricted_dims > 0) { dim3 block(number_of_threads / 2); dim3 grid(restricted_dims, number_of_threads / 2); compute_distances_blocks << < grid, block >> > (d_distance_matrix_full, d_restricteds_pr_dim, restricted_dims, d_neighborhoods_full, d_number_of_neighbors_full, d_X, d_points, d_number_of_points, neighborhood_size, d_restricted_dims, d_number_of_restricted_dims, d, number_of_cells, n); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); find_neighborhood_blocks << < restricted_dims, number_of_threads >> > (d_distance_matrix_full, d_restricteds_pr_dim, restricted_dims, d_neighborhoods_full, d_number_of_neighbors_full, d_X, d_points, d_number_of_points, neighborhood_size, d_restricted_dims, d_number_of_restricted_dims, d, number_of_cells, n); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); compute_is_dense_blocks << < restricted_dims, number_of_threads >> > (d_restricteds_pr_dim, d_is_dense_full, d_points, d_number_of_points, d_neighborhoods_full, neighborhood_size, d_number_of_neighbors_full, d_X, d_restricted_dims, d_number_of_restricted_dims, F, n, num_obj, d, number_of_cells); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); disjoint_set_clustering_blocks << < restricted_dims, number_of_threads >> > (d_restricteds_pr_dim, d_clustering_full, d_disjoint_set_full, d_neighborhoods_full, d_number_of_neighbors_full, d_is_dense_full, d_points, d_number_of_points, number_of_cells, n); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); } }
bf6e63bb318fb9c41c66a5afc79d7265fbd1240e.cu
// // Created by mrjak on 24-04-2020. // #include "ClusteringGpuBlocks.cuh" #include <cuda.h> #include <cuda_runtime.h> #include "../../utils/util.h" #include "../../utils/TmpMalloc.cuh" #include "../../structures/ScyTreeArray.h" #define BLOCK_SIZE 1024 #define PI 3.14 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; __device__ float dist_gpu_blocks(int p_id, int q_id, float *X, int *subspace, int subspace_size, int d) { float *p = &X[p_id * d]; float *q = &X[q_id * d]; double distance = 0; for (int i = 0; i < subspace_size; i++) { int d_i = subspace[i]; double diff = p[d_i] - q[d_i]; distance += diff * diff; } //printf("dinstance = %f\n", distance); return sqrt(distance);//todo squared can be removed by sqrt(x)<=y => x<=y*y if x>=0, y>=0 } __global__ void compute_distances_blocks(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims, int *d_neighborhoods_full, int *d_number_of_neighbors_full, float *X, int **d_points_full, int *d_number_of_points, float neighborhood_size, int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells, int n) { for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) { for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { // printf("test-1\n"); int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; float *d_distance_matrix = d_distance_matrix_full + i_dim * number_of_cells * n * n + i_rest * n * n; int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; // printf("test0\n"); for (int i = blockIdx.y; i < number_of_points; i += gridDim.y) { int p_id = d_points[i]; for (int j = threadIdx.x; j < number_of_points; j += blockDim.x) { int q_id = d_points[j]; if (i < j) { float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d); d_distance_matrix[i * number_of_points + j] = distance; } } } } } } __global__ void find_neighborhood_blocks(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims, int *d_neighborhoods_full, int *d_number_of_neighbors_full, float *X, int **d_points_full, int *d_number_of_points, float neighborhood_size, int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells, int n) { for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) { for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { // printf("test-1\n"); int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; float *d_distance_matrix = d_distance_matrix_full + i_dim * number_of_cells * n * n + i_rest * n * n; int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; // printf("test0\n"); for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { int *d_neighborhood = &d_neighborhoods[i * number_of_points]; int number_of_neighbors = 0; int p_id = d_points[i]; for (int j = 0; j < number_of_points; j++) { int q_id = d_points[j]; if (p_id != q_id) { float distance = 0;//dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d); if (i < j) { distance = d_distance_matrix[i * number_of_points + j]; } else if (j < i) { distance = d_distance_matrix[j * number_of_points + i]; } if (neighborhood_size >= distance) { d_neighborhood[number_of_neighbors] = j; number_of_neighbors++; } } } d_number_of_neighbors[i] = number_of_neighbors; } } } } // //__global__ //void //find_neighborhood_blocks(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims, int *d_neighborhoods_full, // int *d_number_of_neighbors_full, // float *X, // int **d_points_full, int *d_number_of_points, float neighborhood_size, // int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells, // int n) { // for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) { // for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { // //// printf("test-1\n"); // int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; // int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; // int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; // int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; // int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; // int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; //// printf("test0\n"); // // // for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { // int *d_neighborhood = &d_neighborhoods[i * number_of_points]; // int number_of_neighbors = 0; // int p_id = d_points[i]; // for (int j = 0; j < number_of_points; j++) { // int q_id = d_points[j]; // if (p_id != q_id) { // float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d); // if (neighborhood_size >= distance) { // d_neighborhood[number_of_neighbors] = j; // number_of_neighbors++; // } // } // } // d_number_of_neighbors[i] = number_of_neighbors; // } // } // } //} __device__ float phi_gpu_blocks(int p_id, int *d_neighborhood, float neighborhood_size, int number_of_neighbors, float *X, int *d_points, int *subspace, int subspace_size, int d) { float sum = 0; for (int j = 0; j < number_of_neighbors; j++) { int q_id = d_points[d_neighborhood[j]]; if (q_id >= 0) { float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d) / neighborhood_size; float sq = distance * distance; sum += (1. - sq); } } return sum; } __device__ float gamma_gpu_blocks(double n) { if (round(n) == 1) {//todo not nice cond n==1 return 1.; } else if (n < 1) {//todo not nice cond n==1/2 return sqrt(PI); } return (n - 1.) * gamma_gpu_blocks(n - 1.); } __device__ double gamma_gpu_blocks(int n) { if (n == 2) { return 1.; } else if (n == 1) { return sqrt(PI); } return (n / 2. - 1.) * gamma_gpu_blocks(n - 2); } __device__ float c_gpu_blocks(int subspace_size) { float r = pow(PI, subspace_size / 2.); //r = r / gamma_gpu_blocks(subspace_size / 2. + 1.); r = r / gamma_gpu_blocks(subspace_size + 2); return r; } __device__ float alpha_gpu_blocks(int subspace_size, float neighborhood_size, int n) { float v = 1.;//todo v is missing?? what is it?? float r = 2 * n * pow(neighborhood_size, subspace_size) * c_gpu_blocks(subspace_size); r = r / (pow(v, subspace_size) * (subspace_size + 2)); return r; } __device__ float omega_gpu_blocks(int subspace_size) { return 2.0 / (subspace_size + 2.0); } __global__ void compute_is_dense_blocks(int *d_restricteds_pr_dim, bool *d_is_dense_full, int **d_points_full, int *d_number_of_points, int *d_neighborhoods_full, float neighborhood_size, int *d_number_of_neighbors_full, float *X, int **d_restricted_dims_full, int *d_number_of_restricted_dims, float F, int n, int num_obj, int d, int number_of_cells) {//todo change name of subspace int i_dim = blockIdx.x; for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n; int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { int *d_neighborhood = &d_neighborhoods[i * number_of_points]; int p_id = d_points[i]; float p = phi_gpu_blocks(p_id, d_neighborhood, neighborhood_size, d_number_of_neighbors[i], X, d_points, subspace, subspace_size, d); float a = alpha_gpu_blocks(subspace_size, neighborhood_size, n); float w = omega_gpu_blocks(subspace_size); // printf("%d, %f>=%f\n",p_id, p, max(F * a, num_obj * w)); // printf("F=%f, a=%f, num_obj=%d, w=%f\n", F, a, num_obj, w); d_is_dense[i] = p >= max(F * a, num_obj * w); } } } __global__ void compute_is_dense_new_blocks(int *d_restricteds_pr_dim, bool *d_is_dense_full, int **d_points_full, int *d_number_of_points, float neighborhood_size, float *X, int **d_restricted_dims_full, int *d_number_of_restricted_dims, float F, int n, int num_obj, int d, int number_of_cells) {//todo change name of subspace int i_dim = blockIdx.x; for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest]; int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest]; bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n; for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { int p_id = d_points[i]; // float p = phi_gpu_blocks(p_id, d_neighborhood, neighborhood_size, d_number_of_neighbors[i], X, d_points, // subspace, subspace_size, d); float p = 0; for (int j = 0; j < n; j++) { int q_id = j; if (p_id != q_id) { float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d); if (neighborhood_size >= distance) { distance = distance / neighborhood_size; float sq = distance * distance; p += (1. - sq); } } } float a = alpha_gpu_blocks(subspace_size, neighborhood_size, n); float w = omega_gpu_blocks(subspace_size); // printf("%d:%d, %f>=%f\n", p_id, subspace_size, p, max(F * a, num_obj * w)); // printf("%d:%d, F=%f, a=%f, num_obj=%d, w=%f\n", p_id, subspace_size, F, a, num_obj, w); d_is_dense[i] = p >= max(F * a, num_obj * w); } } } //for ref see: http://hpcg.purdue.edu/papers/Stava2011CCL.pdf __global__ void disjoint_set_clustering_blocks(int *d_restricteds_pr_dim, int *d_clustering_full, int *d_disjoint_set_full, int *d_neighborhoods_full, int *d_number_of_neighbors_full, bool *d_is_dense_full, int **d_points_full, int *d_number_of_points, int number_of_cells, int n) { int i_dim = blockIdx.x; for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) { int *d_points = d_points_full[i_dim * number_of_cells + i_rest]; int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest]; int *d_clustering = d_clustering_full + i_dim * n; bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n; int *d_disjoint_set = d_disjoint_set_full + i_dim * number_of_cells * n + i_rest * n; int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n; int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n; __shared__ int changed; changed = 1; __syncthreads(); //init for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { if (d_is_dense[i]) { d_disjoint_set[i] = i; } else { d_disjoint_set[i] = -1; } } __syncthreads(); //for (int itr = 1; itr < number_of_points; itr *= 2) { while (changed) { //disjoint_set_pass1 __syncthreads(); changed = 0; __syncthreads(); for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { if (!d_is_dense[i]) continue; int root = d_disjoint_set[i]; int *d_neighborhood = &d_neighborhoods[i * number_of_points]; for (int j = 0; j < d_number_of_neighbors[i]; j++) { if (d_is_dense[d_neighborhood[j]]) { if (d_disjoint_set[d_neighborhood[j]] < root) { root = d_disjoint_set[d_neighborhood[j]]; atomicMax(&changed, 1); } } } d_disjoint_set[i] = root; } __syncthreads(); //disjoint_set_pass2 for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { int root = d_disjoint_set[i]; while (root >= 0 && root != d_disjoint_set[root]) { root = d_disjoint_set[root]; } d_disjoint_set[i] = root; } __syncthreads(); } //gather_clustering for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) { if (d_is_dense[i]) { d_clustering[d_points[i]] = d_points[d_disjoint_set[i]]; }else { d_clustering[d_points[i]] = -1; } } } } void ClusteringGPUBlocks(TmpMalloc *tmps, int *d_clustering_full, vector <vector<ScyTreeArray *>> L_pruned, float *d_X, int n, int d, float neighborhood_size, float F, int num_obj, int number_of_cells) { tmps->reset_counters(); int restricted_dims = L_pruned.size(); int *h_restricteds_pr_dim = new int[restricted_dims]; int **h_points = new int *[restricted_dims * number_of_cells]; int **h_restricted_dims = new int *[restricted_dims * number_of_cells]; int *h_number_of_points = new int[restricted_dims * number_of_cells]; int *h_number_of_restricted_dims = new int[restricted_dims * number_of_cells]; for (int i = 0; i < restricted_dims; i++) { h_restricteds_pr_dim[i] = L_pruned[i].size(); for (int j = 0; j < L_pruned[i].size(); j++) { h_points[i * number_of_cells + j] = L_pruned[i][j]->d_points; h_restricted_dims[i * number_of_cells + j] = L_pruned[i][j]->d_restricted_dims; h_number_of_points[i * number_of_cells + j] = L_pruned[i][j]->number_of_points; h_number_of_restricted_dims[i * number_of_cells + j] = L_pruned[i][j]->number_of_restricted_dims; } } int *d_restricteds_pr_dim = tmps->get_int_array(tmps->int_array_counter++, restricted_dims); // cudaMalloc(&d_restricteds_pr_dim, restricted_dims * sizeof(int)); cudaMemcpy(d_restricteds_pr_dim, h_restricteds_pr_dim, restricted_dims * sizeof(int), cudaMemcpyHostToDevice); gpuErrchk(cudaPeekAtLastError()); //todo copy int **d_points = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++, restricted_dims * number_of_cells); int **d_restricted_dims = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++, restricted_dims * number_of_cells); int *d_number_of_points = tmps->get_int_array(tmps->int_array_counter++, restricted_dims * number_of_cells); int *d_number_of_restricted_dims = tmps->get_int_array(tmps->int_array_counter++, restricted_dims * number_of_cells); // cudaMalloc(&d_points, restricted_dims * number_of_cells * sizeof(int *)); // cudaMalloc(&d_restricted_dims, restricted_dims * number_of_cells * sizeof(int *)); // cudaMalloc(&d_number_of_points, restricted_dims * number_of_cells * sizeof(int)); // cudaMalloc(&d_number_of_restricted_dims, restricted_dims * number_of_cells * sizeof(int)); gpuErrchk(cudaPeekAtLastError()); cudaMemcpy(d_points, h_points, restricted_dims * number_of_cells * sizeof(int *), cudaMemcpyHostToDevice); gpuErrchk(cudaPeekAtLastError()); cudaMemcpy(d_restricted_dims, h_restricted_dims, restricted_dims * number_of_cells * sizeof(int *), cudaMemcpyHostToDevice); gpuErrchk(cudaPeekAtLastError()); cudaMemcpy(d_number_of_points, h_number_of_points, restricted_dims * number_of_cells * sizeof(int), cudaMemcpyHostToDevice); gpuErrchk(cudaPeekAtLastError()); cudaMemcpy(d_number_of_restricted_dims, h_number_of_restricted_dims, restricted_dims * number_of_cells * sizeof(int), cudaMemcpyHostToDevice); gpuErrchk(cudaPeekAtLastError()); int *d_neighborhoods_full = tmps->get_int_array(tmps->int_array_counter++, n * n * restricted_dims * number_of_cells); float *d_distance_matrix_full = tmps->get_float_array(tmps->float_array_counter++, n * n * restricted_dims * number_of_cells); int *d_number_of_neighbors_full = tmps->get_int_array(tmps->int_array_counter++, n * restricted_dims * number_of_cells); cudaMemset(d_number_of_neighbors_full, 0, restricted_dims * number_of_cells * n * sizeof(int)); bool *d_is_dense_full = tmps->get_bool_array(tmps->bool_array_counter++, n * restricted_dims * number_of_cells); int *d_disjoint_set_full = tmps->get_int_array(tmps->int_array_counter++, n * restricted_dims * number_of_cells); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); int number_of_threads = min(n, BLOCK_SIZE); if (restricted_dims > 0) { dim3 block(number_of_threads / 2); dim3 grid(restricted_dims, number_of_threads / 2); compute_distances_blocks << < grid, block >> > (d_distance_matrix_full, d_restricteds_pr_dim, restricted_dims, d_neighborhoods_full, d_number_of_neighbors_full, d_X, d_points, d_number_of_points, neighborhood_size, d_restricted_dims, d_number_of_restricted_dims, d, number_of_cells, n); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); find_neighborhood_blocks << < restricted_dims, number_of_threads >> > (d_distance_matrix_full, d_restricteds_pr_dim, restricted_dims, d_neighborhoods_full, d_number_of_neighbors_full, d_X, d_points, d_number_of_points, neighborhood_size, d_restricted_dims, d_number_of_restricted_dims, d, number_of_cells, n); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); compute_is_dense_blocks << < restricted_dims, number_of_threads >> > (d_restricteds_pr_dim, d_is_dense_full, d_points, d_number_of_points, d_neighborhoods_full, neighborhood_size, d_number_of_neighbors_full, d_X, d_restricted_dims, d_number_of_restricted_dims, F, n, num_obj, d, number_of_cells); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); disjoint_set_clustering_blocks << < restricted_dims, number_of_threads >> > (d_restricteds_pr_dim, d_clustering_full, d_disjoint_set_full, d_neighborhoods_full, d_number_of_neighbors_full, d_is_dense_full, d_points, d_number_of_points, number_of_cells, n); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); } }
c23b07bd2c054a6cf1d92b72c14f88786be29cf3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "OPT_2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_adjList = NULL; hipMalloc(&d_adjList, XSIZE*YSIZE); int *d_sizeAdj = NULL; hipMalloc(&d_sizeAdj, XSIZE*YSIZE); int *d_lcmMatrix = NULL; hipMalloc(&d_lcmMatrix, XSIZE*YSIZE); int *d_LCMSize = NULL; hipMalloc(&d_LCMSize, XSIZE*YSIZE); int n_vertices = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( OPT_2), dim3(gridBlock),dim3(threadBlock), 0, 0, d_adjList,d_sizeAdj,d_lcmMatrix,d_LCMSize,n_vertices); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( OPT_2), dim3(gridBlock),dim3(threadBlock), 0, 0, d_adjList,d_sizeAdj,d_lcmMatrix,d_LCMSize,n_vertices); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( OPT_2), dim3(gridBlock),dim3(threadBlock), 0, 0, d_adjList,d_sizeAdj,d_lcmMatrix,d_LCMSize,n_vertices); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c23b07bd2c054a6cf1d92b72c14f88786be29cf3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "OPT_2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_adjList = NULL; cudaMalloc(&d_adjList, XSIZE*YSIZE); int *d_sizeAdj = NULL; cudaMalloc(&d_sizeAdj, XSIZE*YSIZE); int *d_lcmMatrix = NULL; cudaMalloc(&d_lcmMatrix, XSIZE*YSIZE); int *d_LCMSize = NULL; cudaMalloc(&d_LCMSize, XSIZE*YSIZE); int n_vertices = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); OPT_2<<<gridBlock,threadBlock>>>(d_adjList,d_sizeAdj,d_lcmMatrix,d_LCMSize,n_vertices); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { OPT_2<<<gridBlock,threadBlock>>>(d_adjList,d_sizeAdj,d_lcmMatrix,d_LCMSize,n_vertices); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { OPT_2<<<gridBlock,threadBlock>>>(d_adjList,d_sizeAdj,d_lcmMatrix,d_LCMSize,n_vertices); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
46da2204f9174e601dbc8c206d2c51bd0bff9418.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include "real.h" #include "dataDef.h" #include "parallelSpmv.h" #define MAXTHREADS 128 #define REP 1000 #ifdef DOUBLE texture<int2> xTex; texture<int2> valTex; #else texture<float> xTex; texture<float> valTex; #endif void meanAndSd(real *mean, real *sd,real *data, int n) { real sum = (real) 0.0; real standardDeviation = (real) 0.0; for(int i=0; i<n; ++i) { sum += data[i]; } // end for // *mean = sum/n; for(int i=0; i<n; ++i) { standardDeviation += pow(data[i] - *mean, 2); } // end for // *sd=sqrt(standardDeviation/n); } // end of calculateSD // int main(int argc, char *argv[]) { #include "parallelSpmvData.h" hipError_t cuda_ret; cuda_ret = hipGetDeviceCount(&ngpus); if(cuda_ret != hipSuccess) FATAL("Unable to deternine number of GPUs"); //ngpus=4; // verifing number of input parameters // char exists='t'; char checkSol='f'; if (argc < 3 ) { printf("Use: %s Matrix_filename InputVector_filename [SolutionVector_filename] \n", argv[0]); exists='f'; } // endif // FILE *fh=NULL; // testing if matrix file exists if((fh = fopen(argv[1], "rb") ) == NULL) { printf("No matrix file found.\n"); exists='f'; } // end if // // testing if input file exists if((fh = fopen(argv[2], "rb") ) == NULL) { printf("No input vector file found.\n"); exists='f'; } // end if // // testing if output file exists if (argc >3 ) { if((fh = fopen(argv[3], "rb") ) == NULL) { printf("No output vector file found.\n"); exists='f'; } else { checkSol='t'; } // end if // } // end if // if (fh) fclose(fh); if (exists == 'f') { printf("Quitting.....\n"); exit(0); } // end if // printf("%s Precision. Solving using %d %s\n", (sizeof(real) == sizeof(double)) ? "Double": "Single", ngpus, (ngpus > 1) ? "GPUs": "GPU" ); #include "parallelSpmvAllocateData.h" reader(&n, on_proc_nnz, off_proc_nnz, &row_ptr,&col_idx,&val, &row_ptr_off,&col_idx_off,&val_off, argv[1], ngpus); if (ngpus>1) { createCommunicator(nColsOff, sendColumns, recvSendCount , col_idx_off, off_proc_nnz, n,&ngpus); } // end if // // ready to start // int totalNNZ=0; for (int gpu=0; gpu<ngpus; ++gpu) { totalNNZ+=on_proc_nnz[gpu]; totalNNZ+=off_proc_nnz[gpu]; cuda_ret = hipSetDevice(gpu); if(cuda_ret != hipSuccess) FATAL("Unable to set gpu"); //cuda_ret = hipStreamCreateWithFlags(&stream0[gpu], hipStreamDefault); cuda_ret = hipStreamCreateWithFlags(&stream0[gpu], hipStreamNonBlocking ) ; if(cuda_ret != hipSuccess) FATAL("Unable to create stream0"); cuda_ret = hipStreamCreateWithFlags(&stream1[gpu], hipStreamNonBlocking ) ; if(cuda_ret != hipSuccess) FATAL("Unable to create stream1"); v[gpu] = (real *) malloc((n[gpu])*sizeof(real)); w[gpu] = (real *) malloc((n[gpu])*sizeof(real)); vectorReader(v[gpu], &gpu, n, argv[2]); if (ngpus > 1) hipHostMalloc((real **)&v_off[gpu] , nColsOff[gpu]*sizeof(real),hipHostMallocDefault); ///////////////////////////////////////////////////// // determining the standard deviation of the nnz per row real *temp=(real *) malloc((n[gpu])*sizeof(real)); for (int row=0; row<n[gpu]; ++row) { temp[row] = row_ptr[gpu][row+1] - row_ptr[gpu][row]; } // end for // meanAndSd(&meanNnzPerRow0[gpu],&sd0[gpu],temp,n[gpu]); //printf("file: %s, line: %d, gpu on-prcoc: %d, mean: %7.3f, sd: %7.3f using: %s \n", __FILE__, __LINE__, gpu , meanNnzPerRow0[gpu], sd0[gpu], (meanNnzPerRow0[gpu] + 0.5*sd0[gpu] < 32) ? "spmv0": "spmv1"); if (nColsOff[gpu]) { for (int row=0; row<n[gpu]; ++row) { temp[row] = row_ptr_off[gpu][row+1] - row_ptr_off[gpu][row]; } // end for // meanAndSd(&meanNnzPerRow1[gpu],&sd1[gpu],temp,n[gpu]); //printf("file: %s, line: %d, gpu off-prcoc: %d, mean: %7.3f, sd: %7.3f using: %s \n", __FILE__, __LINE__, gpu , meanNnzPerRow1[gpu], sd1[gpu], (meanNnzPerRow1[gpu] + 0.5*sd1[gpu] < 32) ? "spmv0": "spmv1"); } // end if // free(temp); ///////////////////////////////////////////////////// hipSetDevice(gpu); //printf("file: %s, line: %d, setting gpu: %d\n", __FILE__, __LINE__,gpu); // Allocating device memory for on_proc input matrices cuda_ret = hipMalloc((void **) &row_ptr_d[gpu], (n[gpu]+1) * sizeof(int) ); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for rows_d"); cuda_ret = hipMalloc((void **) &col_idx_d[gpu], on_proc_nnz[gpu] * sizeof(int)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for cols_d"); cuda_ret = hipMalloc((void **) &val_d[gpu], on_proc_nnz[gpu] * sizeof(real)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for vals_d"); // Copy the input on_proc matrices from the host memory to the device memory cuda_ret = hipMemcpy(row_ptr_d[gpu], row_ptr[gpu], (n[gpu]+1)*sizeof(int),hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix row_ptr_d"); cuda_ret = hipMemcpy(col_idx_d[gpu], col_idx[gpu], on_proc_nnz[gpu]*sizeof(int),hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix col_idx_d"); cuda_ret = hipMemcpy(val_d[gpu], val[gpu], on_proc_nnz[gpu]*sizeof(real),hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix val_d"); // Allocating device memory for inpit and output vectors cuda_ret = hipMalloc((void **) &(w_d[gpu]), n[gpu]*sizeof(real)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for w_d"); cuda_ret = hipMalloc((void **) &(v_d[gpu]), n[gpu]*sizeof(real)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for v_d"); // Copy the input vector to device cuda_ret = hipMemcpy(v_d[gpu], v[gpu], n[gpu]*sizeof(real),hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix v_d"); if (ngpus > 1) { // Allocating device memory for off_proc input matrices cuda_ret = hipMalloc((void **) &row_ptr_off_d[gpu], (n[gpu]+1)*sizeof(int)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for rows_d"); cuda_ret = hipMalloc((void **) &col_idx_off_d[gpu], off_proc_nnz[gpu] * sizeof(int)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for cols_d"); cuda_ret = hipMalloc((void **) &val_off_d[gpu], off_proc_nnz[gpu] *sizeof(real)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for vals_d"); // Copy the input off_proc matrices from the host memory to the device memory cuda_ret = hipMemcpy(col_idx_off_d[gpu], col_idx_off[gpu], off_proc_nnz[gpu]*sizeof(int),hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix col_idx_d"); cuda_ret = hipMemcpy(val_off_d[gpu] , val_off[gpu], off_proc_nnz[gpu]*sizeof(real),hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix val_d"); cuda_ret = hipMemcpy(row_ptr_off_d[gpu], row_ptr_off[gpu], (n[gpu]+1)*sizeof(int),hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix row_ptr_d"); // Allocating device memory for inpit and output vectors cuda_ret = hipMalloc((void **) &v_off_d[gpu], nColsOff[gpu] *sizeof(real)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for v_off_d"); } // end if // ///////////////////////////////////////////////////////////////////////// printf("In GPU: %d\n",gpu); if (meanNnzPerRow0[gpu] < 10 && parameter2Adjust*sd0[gpu] < warpSize) { // these mean use scalar spmv if (meanNnzPerRow0[gpu] < (real) 4.5) { block0[gpu].x=128; } else if (meanNnzPerRow0[gpu]< (real) 14.4) { block0[gpu].x=64; } else { block0[gpu].x=32; } // end if // grid0[gpu].x = ( (n[gpu] + block0[gpu].x -1) /block0[gpu].x ); printf("using scalar spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block0[gpu].x,block0[gpu].y, meanNnzPerRow0[gpu], sd0[gpu]) ; } else { // these mean use vector spmv if (meanNnzPerRow0[gpu] > 10.0*warpSize) { block0[gpu].x=2*warpSize; } else if (meanNnzPerRow0[gpu] > 4.0*warpSize) { block0[gpu].x=warpSize/2; } else { block0[gpu].x=warpSize/4; } // end if // block0[gpu].y=MAXTHREADS/block0[gpu].x; grid0[gpu].x = ( (n[gpu] + block0[gpu].y - 1) / block0[gpu].y ) ; sharedMemorySize0[gpu]=block0[gpu].x*block0[gpu].y*sizeof(real); printf("using vector spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block0[gpu].x,block0[gpu].y, meanNnzPerRow0[gpu], sd0[gpu]) ; } // end if // if (ngpus > 1) { if (meanNnzPerRow1[gpu] < 10 && parameter2Adjust*sd1[gpu] < warpSize) { // these mean use scalar spmv if (meanNnzPerRow1[gpu] < (real) 4.5) { block1[gpu].x=128; } else if (meanNnzPerRow1[gpu] < (real) 14.4) { block1[gpu].x=64; } else { block1[gpu].x=32; } // end if // grid1[gpu].x = ( (n[gpu] + block1[gpu].x -1) /block1[gpu].x ); printf("using scalar spmv for off matrix, blockSize: [%d, %d] %f, %f\n",block1[gpu].x,block1[gpu].y, meanNnzPerRow1[gpu], sd1[gpu]) ; } else { // these mean use vector spmv if (meanNnzPerRow1[gpu] > 10.0*warpSize) { block1[gpu].x=2*warpSize; } else if (meanNnzPerRow1[gpu] > 4.0*warpSize) { block1[gpu].x=warpSize/2; } else { block1[gpu].x=warpSize/4; } // end if // block1[gpu].y=MAXTHREADS/block1[gpu].x; grid1[gpu].x = ( (n[gpu] + block1[gpu].y - 1) / block1[gpu].y ) ; sharedMemorySize1[gpu]=block1[gpu].x*block1[gpu].y*sizeof(real); printf("using vector spmv for off matrix, blockSize: [%d, %d] %f, %f\n",block1[gpu].x,block1[gpu].y, meanNnzPerRow1[gpu], sd1[gpu]) ; } // end if // } } // end for // // Timing should begin here// struct timeval tp; // timer double elapsed_time; gettimeofday(&tp,NULL); // Unix timer elapsed_time = -(tp.tv_sec*1.0e6 + tp.tv_usec); for (int t=0; t<REP; ++t) { // send the first spmv for (int gpu=0; gpu<ngpus; ++gpu) { hipSetDevice(gpu); cuda_ret = hipMemsetAsync(w_d[gpu], 0, sizeof(real)*n[gpu],stream0[gpu] ); if(cuda_ret != hipSuccess) FATAL("Unable to set device for matrix w_d[gpu]"); cuda_ret = hipBindTexture(NULL, xTex, v_d[gpu], n[gpu] * sizeof(real)); cuda_ret = hipBindTexture(NULL, valTex, val_d[gpu], on_proc_nnz[gpu] * sizeof(real)); hipLaunchKernelGGL(( spmv), dim3(grid0[gpu]), dim3(block0[gpu]), sharedMemorySize0[gpu],stream0[gpu], w_d[gpu], row_ptr_d[gpu], col_idx_d[gpu], n[gpu]); cuda_ret = hipUnbindTexture(xTex); cuda_ret = hipUnbindTexture(valTex); } // end for // if (ngpus > 1 ) { startComunication(v,v_off,recvSendCount, sendColumns, &ngpus); // send the second spmv for (int gpu=0; gpu<ngpus; ++gpu) { hipSetDevice(gpu); cuda_ret = hipMemcpyAsync(v_off_d[gpu], v_off[gpu], nColsOff[gpu]*sizeof(real),hipMemcpyHostToDevice,stream1[gpu] ) ; if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device array v_off_d"); cuda_ret = hipBindTexture(NULL, xTex, v_off_d[gpu], nColsOff[gpu] * sizeof(real)); cuda_ret = hipBindTexture(NULL, valTex, val_off_d[gpu], off_proc_nnz[gpu] * sizeof(real)); hipLaunchKernelGGL(( spmv), dim3(grid1[gpu]), dim3(block1[gpu]), sharedMemorySize1[gpu],stream1[gpu] , w_d[gpu], row_ptr_off_d[gpu], col_idx_off_d[gpu], n[gpu]); cuda_ret = hipUnbindTexture(xTex); cuda_ret = hipUnbindTexture(valTex); } // end for // } // end if // for (int gpu=0; gpu<ngpus; ++gpu) { hipSetDevice(gpu); //hipStreamSynchronize(NULL); hipStreamSynchronize(stream0[gpu]); hipStreamSynchronize(stream1[gpu]); } // end for // } // end for // gettimeofday(&tp,NULL); elapsed_time += (tp.tv_sec*1.0e6 + tp.tv_usec); printf ("Total time was %f seconds, GFLOPS: %f\n", elapsed_time*1.0e-6, 2.0*totalNNZ*REP*1.0e-3/elapsed_time ); for (int gpu=0; gpu<ngpus; ++gpu) { hipSetDevice(gpu); cuda_ret = hipMemcpy(w[gpu], w_d[gpu], n[gpu]*sizeof(real),hipMemcpyDeviceToHost); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix y_d back to host"); } // end for // if (checkSol=='t') { for (int gpu=0; gpu<ngpus; ++gpu) { real *sol= (real *) malloc( n[gpu] * sizeof(real)); hipSetDevice(gpu); // reading input vector vectorReader(sol, &gpu, n, argv[3]); int row=0; real tolerance = 1.0e-08; if (sizeof(real) != sizeof(double) ) { tolerance = 1.0e-02; } // end if // real error; do { error = fabs(sol[row] - w[gpu][row]) /fabs(sol[row]); if ( error > tolerance ) break; ++row; } while (row < n[gpu]); // end do-while // if (row == n[gpu]) { printf("Solution match in gpu %d\n",gpu); } else { printf("For Matrix %s, solution does not match at element %d in gpu %d %20.13e --> %20.13e error -> %20.13e, tolerance: %20.13e \n", argv[1], (row+1),gpu, sol[row], w[gpu][row], error , tolerance ); } // end if // free(sol); } // end for // } // end if // #include "parallelSpmvCleanData.h" return 0; // printf("file: %s, line: %d, so far so good\n", __FILE__, __LINE__ ); exit(0); } // end main() //
46da2204f9174e601dbc8c206d2c51bd0bff9418.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include "real.h" #include "dataDef.h" #include "parallelSpmv.h" #define MAXTHREADS 128 #define REP 1000 #ifdef DOUBLE texture<int2> xTex; texture<int2> valTex; #else texture<float> xTex; texture<float> valTex; #endif void meanAndSd(real *mean, real *sd,real *data, int n) { real sum = (real) 0.0; real standardDeviation = (real) 0.0; for(int i=0; i<n; ++i) { sum += data[i]; } // end for // *mean = sum/n; for(int i=0; i<n; ++i) { standardDeviation += pow(data[i] - *mean, 2); } // end for // *sd=sqrt(standardDeviation/n); } // end of calculateSD // int main(int argc, char *argv[]) { #include "parallelSpmvData.h" cudaError_t cuda_ret; cuda_ret = cudaGetDeviceCount(&ngpus); if(cuda_ret != cudaSuccess) FATAL("Unable to deternine number of GPUs"); //ngpus=4; // verifing number of input parameters // char exists='t'; char checkSol='f'; if (argc < 3 ) { printf("Use: %s Matrix_filename InputVector_filename [SolutionVector_filename] \n", argv[0]); exists='f'; } // endif // FILE *fh=NULL; // testing if matrix file exists if((fh = fopen(argv[1], "rb") ) == NULL) { printf("No matrix file found.\n"); exists='f'; } // end if // // testing if input file exists if((fh = fopen(argv[2], "rb") ) == NULL) { printf("No input vector file found.\n"); exists='f'; } // end if // // testing if output file exists if (argc >3 ) { if((fh = fopen(argv[3], "rb") ) == NULL) { printf("No output vector file found.\n"); exists='f'; } else { checkSol='t'; } // end if // } // end if // if (fh) fclose(fh); if (exists == 'f') { printf("Quitting.....\n"); exit(0); } // end if // printf("%s Precision. Solving using %d %s\n", (sizeof(real) == sizeof(double)) ? "Double": "Single", ngpus, (ngpus > 1) ? "GPUs": "GPU" ); #include "parallelSpmvAllocateData.h" reader(&n, on_proc_nnz, off_proc_nnz, &row_ptr,&col_idx,&val, &row_ptr_off,&col_idx_off,&val_off, argv[1], ngpus); if (ngpus>1) { createCommunicator(nColsOff, sendColumns, recvSendCount , col_idx_off, off_proc_nnz, n,&ngpus); } // end if // // ready to start // int totalNNZ=0; for (int gpu=0; gpu<ngpus; ++gpu) { totalNNZ+=on_proc_nnz[gpu]; totalNNZ+=off_proc_nnz[gpu]; cuda_ret = cudaSetDevice(gpu); if(cuda_ret != cudaSuccess) FATAL("Unable to set gpu"); //cuda_ret = cudaStreamCreateWithFlags(&stream0[gpu], cudaStreamDefault); cuda_ret = cudaStreamCreateWithFlags(&stream0[gpu], cudaStreamNonBlocking ) ; if(cuda_ret != cudaSuccess) FATAL("Unable to create stream0"); cuda_ret = cudaStreamCreateWithFlags(&stream1[gpu], cudaStreamNonBlocking ) ; if(cuda_ret != cudaSuccess) FATAL("Unable to create stream1"); v[gpu] = (real *) malloc((n[gpu])*sizeof(real)); w[gpu] = (real *) malloc((n[gpu])*sizeof(real)); vectorReader(v[gpu], &gpu, n, argv[2]); if (ngpus > 1) cudaHostAlloc((real **)&v_off[gpu] , nColsOff[gpu]*sizeof(real),cudaHostAllocDefault); ///////////////////////////////////////////////////// // determining the standard deviation of the nnz per row real *temp=(real *) malloc((n[gpu])*sizeof(real)); for (int row=0; row<n[gpu]; ++row) { temp[row] = row_ptr[gpu][row+1] - row_ptr[gpu][row]; } // end for // meanAndSd(&meanNnzPerRow0[gpu],&sd0[gpu],temp,n[gpu]); //printf("file: %s, line: %d, gpu on-prcoc: %d, mean: %7.3f, sd: %7.3f using: %s \n", __FILE__, __LINE__, gpu , meanNnzPerRow0[gpu], sd0[gpu], (meanNnzPerRow0[gpu] + 0.5*sd0[gpu] < 32) ? "spmv0": "spmv1"); if (nColsOff[gpu]) { for (int row=0; row<n[gpu]; ++row) { temp[row] = row_ptr_off[gpu][row+1] - row_ptr_off[gpu][row]; } // end for // meanAndSd(&meanNnzPerRow1[gpu],&sd1[gpu],temp,n[gpu]); //printf("file: %s, line: %d, gpu off-prcoc: %d, mean: %7.3f, sd: %7.3f using: %s \n", __FILE__, __LINE__, gpu , meanNnzPerRow1[gpu], sd1[gpu], (meanNnzPerRow1[gpu] + 0.5*sd1[gpu] < 32) ? "spmv0": "spmv1"); } // end if // free(temp); ///////////////////////////////////////////////////// cudaSetDevice(gpu); //printf("file: %s, line: %d, setting gpu: %d\n", __FILE__, __LINE__,gpu); // Allocating device memory for on_proc input matrices cuda_ret = cudaMalloc((void **) &row_ptr_d[gpu], (n[gpu]+1) * sizeof(int) ); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for rows_d"); cuda_ret = cudaMalloc((void **) &col_idx_d[gpu], on_proc_nnz[gpu] * sizeof(int)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for cols_d"); cuda_ret = cudaMalloc((void **) &val_d[gpu], on_proc_nnz[gpu] * sizeof(real)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for vals_d"); // Copy the input on_proc matrices from the host memory to the device memory cuda_ret = cudaMemcpy(row_ptr_d[gpu], row_ptr[gpu], (n[gpu]+1)*sizeof(int),cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix row_ptr_d"); cuda_ret = cudaMemcpy(col_idx_d[gpu], col_idx[gpu], on_proc_nnz[gpu]*sizeof(int),cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix col_idx_d"); cuda_ret = cudaMemcpy(val_d[gpu], val[gpu], on_proc_nnz[gpu]*sizeof(real),cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix val_d"); // Allocating device memory for inpit and output vectors cuda_ret = cudaMalloc((void **) &(w_d[gpu]), n[gpu]*sizeof(real)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for w_d"); cuda_ret = cudaMalloc((void **) &(v_d[gpu]), n[gpu]*sizeof(real)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for v_d"); // Copy the input vector to device cuda_ret = cudaMemcpy(v_d[gpu], v[gpu], n[gpu]*sizeof(real),cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix v_d"); if (ngpus > 1) { // Allocating device memory for off_proc input matrices cuda_ret = cudaMalloc((void **) &row_ptr_off_d[gpu], (n[gpu]+1)*sizeof(int)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for rows_d"); cuda_ret = cudaMalloc((void **) &col_idx_off_d[gpu], off_proc_nnz[gpu] * sizeof(int)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for cols_d"); cuda_ret = cudaMalloc((void **) &val_off_d[gpu], off_proc_nnz[gpu] *sizeof(real)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for vals_d"); // Copy the input off_proc matrices from the host memory to the device memory cuda_ret = cudaMemcpy(col_idx_off_d[gpu], col_idx_off[gpu], off_proc_nnz[gpu]*sizeof(int),cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix col_idx_d"); cuda_ret = cudaMemcpy(val_off_d[gpu] , val_off[gpu], off_proc_nnz[gpu]*sizeof(real),cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix val_d"); cuda_ret = cudaMemcpy(row_ptr_off_d[gpu], row_ptr_off[gpu], (n[gpu]+1)*sizeof(int),cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix row_ptr_d"); // Allocating device memory for inpit and output vectors cuda_ret = cudaMalloc((void **) &v_off_d[gpu], nColsOff[gpu] *sizeof(real)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for v_off_d"); } // end if // ///////////////////////////////////////////////////////////////////////// printf("In GPU: %d\n",gpu); if (meanNnzPerRow0[gpu] < 10 && parameter2Adjust*sd0[gpu] < warpSize) { // these mean use scalar spmv if (meanNnzPerRow0[gpu] < (real) 4.5) { block0[gpu].x=128; } else if (meanNnzPerRow0[gpu]< (real) 14.4) { block0[gpu].x=64; } else { block0[gpu].x=32; } // end if // grid0[gpu].x = ( (n[gpu] + block0[gpu].x -1) /block0[gpu].x ); printf("using scalar spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block0[gpu].x,block0[gpu].y, meanNnzPerRow0[gpu], sd0[gpu]) ; } else { // these mean use vector spmv if (meanNnzPerRow0[gpu] > 10.0*warpSize) { block0[gpu].x=2*warpSize; } else if (meanNnzPerRow0[gpu] > 4.0*warpSize) { block0[gpu].x=warpSize/2; } else { block0[gpu].x=warpSize/4; } // end if // block0[gpu].y=MAXTHREADS/block0[gpu].x; grid0[gpu].x = ( (n[gpu] + block0[gpu].y - 1) / block0[gpu].y ) ; sharedMemorySize0[gpu]=block0[gpu].x*block0[gpu].y*sizeof(real); printf("using vector spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block0[gpu].x,block0[gpu].y, meanNnzPerRow0[gpu], sd0[gpu]) ; } // end if // if (ngpus > 1) { if (meanNnzPerRow1[gpu] < 10 && parameter2Adjust*sd1[gpu] < warpSize) { // these mean use scalar spmv if (meanNnzPerRow1[gpu] < (real) 4.5) { block1[gpu].x=128; } else if (meanNnzPerRow1[gpu] < (real) 14.4) { block1[gpu].x=64; } else { block1[gpu].x=32; } // end if // grid1[gpu].x = ( (n[gpu] + block1[gpu].x -1) /block1[gpu].x ); printf("using scalar spmv for off matrix, blockSize: [%d, %d] %f, %f\n",block1[gpu].x,block1[gpu].y, meanNnzPerRow1[gpu], sd1[gpu]) ; } else { // these mean use vector spmv if (meanNnzPerRow1[gpu] > 10.0*warpSize) { block1[gpu].x=2*warpSize; } else if (meanNnzPerRow1[gpu] > 4.0*warpSize) { block1[gpu].x=warpSize/2; } else { block1[gpu].x=warpSize/4; } // end if // block1[gpu].y=MAXTHREADS/block1[gpu].x; grid1[gpu].x = ( (n[gpu] + block1[gpu].y - 1) / block1[gpu].y ) ; sharedMemorySize1[gpu]=block1[gpu].x*block1[gpu].y*sizeof(real); printf("using vector spmv for off matrix, blockSize: [%d, %d] %f, %f\n",block1[gpu].x,block1[gpu].y, meanNnzPerRow1[gpu], sd1[gpu]) ; } // end if // } } // end for // // Timing should begin here// struct timeval tp; // timer double elapsed_time; gettimeofday(&tp,NULL); // Unix timer elapsed_time = -(tp.tv_sec*1.0e6 + tp.tv_usec); for (int t=0; t<REP; ++t) { // send the first spmv for (int gpu=0; gpu<ngpus; ++gpu) { cudaSetDevice(gpu); cuda_ret = cudaMemsetAsync(w_d[gpu], 0, sizeof(real)*n[gpu],stream0[gpu] ); if(cuda_ret != cudaSuccess) FATAL("Unable to set device for matrix w_d[gpu]"); cuda_ret = cudaBindTexture(NULL, xTex, v_d[gpu], n[gpu] * sizeof(real)); cuda_ret = cudaBindTexture(NULL, valTex, val_d[gpu], on_proc_nnz[gpu] * sizeof(real)); spmv<<<grid0[gpu], block0[gpu], sharedMemorySize0[gpu],stream0[gpu]>>>(w_d[gpu], row_ptr_d[gpu], col_idx_d[gpu], n[gpu]); cuda_ret = cudaUnbindTexture(xTex); cuda_ret = cudaUnbindTexture(valTex); } // end for // if (ngpus > 1 ) { startComunication(v,v_off,recvSendCount, sendColumns, &ngpus); // send the second spmv for (int gpu=0; gpu<ngpus; ++gpu) { cudaSetDevice(gpu); cuda_ret = cudaMemcpyAsync(v_off_d[gpu], v_off[gpu], nColsOff[gpu]*sizeof(real),cudaMemcpyHostToDevice,stream1[gpu] ) ; if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device array v_off_d"); cuda_ret = cudaBindTexture(NULL, xTex, v_off_d[gpu], nColsOff[gpu] * sizeof(real)); cuda_ret = cudaBindTexture(NULL, valTex, val_off_d[gpu], off_proc_nnz[gpu] * sizeof(real)); spmv<<<grid1[gpu], block1[gpu], sharedMemorySize1[gpu],stream1[gpu] >>>(w_d[gpu], row_ptr_off_d[gpu], col_idx_off_d[gpu], n[gpu]); cuda_ret = cudaUnbindTexture(xTex); cuda_ret = cudaUnbindTexture(valTex); } // end for // } // end if // for (int gpu=0; gpu<ngpus; ++gpu) { cudaSetDevice(gpu); //cudaStreamSynchronize(NULL); cudaStreamSynchronize(stream0[gpu]); cudaStreamSynchronize(stream1[gpu]); } // end for // } // end for // gettimeofday(&tp,NULL); elapsed_time += (tp.tv_sec*1.0e6 + tp.tv_usec); printf ("Total time was %f seconds, GFLOPS: %f\n", elapsed_time*1.0e-6, 2.0*totalNNZ*REP*1.0e-3/elapsed_time ); for (int gpu=0; gpu<ngpus; ++gpu) { cudaSetDevice(gpu); cuda_ret = cudaMemcpy(w[gpu], w_d[gpu], n[gpu]*sizeof(real),cudaMemcpyDeviceToHost); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix y_d back to host"); } // end for // if (checkSol=='t') { for (int gpu=0; gpu<ngpus; ++gpu) { real *sol= (real *) malloc( n[gpu] * sizeof(real)); cudaSetDevice(gpu); // reading input vector vectorReader(sol, &gpu, n, argv[3]); int row=0; real tolerance = 1.0e-08; if (sizeof(real) != sizeof(double) ) { tolerance = 1.0e-02; } // end if // real error; do { error = fabs(sol[row] - w[gpu][row]) /fabs(sol[row]); if ( error > tolerance ) break; ++row; } while (row < n[gpu]); // end do-while // if (row == n[gpu]) { printf("Solution match in gpu %d\n",gpu); } else { printf("For Matrix %s, solution does not match at element %d in gpu %d %20.13e --> %20.13e error -> %20.13e, tolerance: %20.13e \n", argv[1], (row+1),gpu, sol[row], w[gpu][row], error , tolerance ); } // end if // free(sol); } // end for // } // end if // #include "parallelSpmvCleanData.h" return 0; // printf("file: %s, line: %d, so far so good\n", __FILE__, __LINE__ ); exit(0); } // end main() //
71a1630e3fc2e4765e7438642749886d70b794bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SDSC Summer Institute 2018 // Andreas Goetz (agoetz@sdsc.edu) // Hello World Program in CUDA C // // Contains a function that is executed on the device (GPU) // #include<stdio.h> __global__ void my_kernel(void){ } int main(void) { hipLaunchKernelGGL(( my_kernel), dim3(1),dim3(1), 0, 0, ); printf("Hello World!\n"); return 0; }
71a1630e3fc2e4765e7438642749886d70b794bf.cu
// SDSC Summer Institute 2018 // Andreas Goetz (agoetz@sdsc.edu) // Hello World Program in CUDA C // // Contains a function that is executed on the device (GPU) // #include<stdio.h> __global__ void my_kernel(void){ } int main(void) { my_kernel<<<1,1>>>(); printf("Hello World!\n"); return 0; }
f4ff64cbb68f0b5b86b6d8f152e0df413ba7d890.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define SIZ 20 #define num_inp 4 using namespace std; typedef struct edge { int first, second; } edges; __global__ void dhidden_cal_kernel(double * a1,double * dhidden,int size) { int i = blockIdx.x; int j = threadIdx.x; if (a1[i*size + j] <= 0) { dhidden[i*size + j] = 0; } }
f4ff64cbb68f0b5b86b6d8f152e0df413ba7d890.cu
#include "includes.h" #define SIZ 20 #define num_inp 4 using namespace std; typedef struct edge { int first, second; } edges; __global__ void dhidden_cal_kernel(double * a1,double * dhidden,int size) { int i = blockIdx.x; int j = threadIdx.x; if (a1[i*size + j] <= 0) { dhidden[i*size + j] = 0; } }
98a472baa01720122deb576d833759156c12de4f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/legacy/graph.hpp> #include <cugraph/utilities/error.hpp> #include <raft/lap/lap.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <thrust/for_each.h> #include <thrust/random.h> #include <iostream> #include <limits> //#define TIMING #ifdef TIMING #include <utilities/high_res_timer.hpp> #endif namespace cugraph { namespace detail { template <typename weight_t> weight_t default_epsilon() { return 0; } template <> float default_epsilon() { return float{1e-6}; } template <> double default_epsilon() { return double{1e-6}; } template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, index_t num_rows, index_t num_cols, weight_t const* d_original_cost, index_t* d_assignment, weight_t epsilon) { if (num_rows == num_cols) { rmm::device_uvector<index_t> col_assignments_v(num_rows, handle.get_stream_view()); // Create an instance of LinearAssignmentProblem using problem size, number of subproblems raft::lap::LinearAssignmentProblem<index_t, weight_t> lpx(handle, num_rows, 1, epsilon); // Solve LAP(s) for given cost matrix lpx.solve(d_original_cost, d_assignment, col_assignments_v.data()); return lpx.getPrimalObjectiveValue(0); } else { // // Create a square matrix, copy d_original_cost into it. // Fill the extra rows/columns with max(d_original_cost) // index_t n = ::max(num_rows, num_cols); weight_t max_cost = thrust::reduce(handle.get_thrust_policy(), d_original_cost, d_original_cost + (num_rows * num_cols), weight_t{0}, thrust::maximum<weight_t>()); rmm::device_uvector<weight_t> tmp_cost_v(n * n, handle.get_stream_view()); rmm::device_uvector<index_t> tmp_row_assignment_v(n, handle.get_stream_view()); rmm::device_uvector<index_t> tmp_col_assignment_v(n, handle.get_stream_view()); thrust::transform(handle.get_thrust_policy(), thrust::make_counting_iterator<index_t>(0), thrust::make_counting_iterator<index_t>(n * n), tmp_cost_v.begin(), [max_cost, d_original_cost, n, num_rows, num_cols] __device__(index_t i) { index_t row = i / n; index_t col = i % n; return ((row < num_rows) && (col < num_cols)) ? d_original_cost[row * num_cols + col] : max_cost; }); raft::lap::LinearAssignmentProblem<index_t, weight_t> lpx(handle, n, 1, epsilon); // Solve LAP(s) for given cost matrix lpx.solve(tmp_cost_v.begin(), tmp_row_assignment_v.begin(), tmp_col_assignment_v.begin()); weight_t tmp_objective_value = lpx.getPrimalObjectiveValue(0); raft::copy(d_assignment, tmp_row_assignment_v.begin(), num_rows, handle.get_stream()); return tmp_objective_value - max_cost * std::abs(num_rows - num_cols); } } template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian_sparse(raft::handle_t const& handle, legacy::GraphCOOView<vertex_t, edge_t, weight_t> const& graph, vertex_t num_workers, vertex_t const* workers, vertex_t* assignment, weight_t epsilon) { CUGRAPH_EXPECTS(assignment != nullptr, "Invalid input argument: assignment pointer is NULL"); CUGRAPH_EXPECTS(graph.edge_data != nullptr, "Invalid input argument: graph must have edge data (costs)"); #ifdef TIMING HighResTimer hr_timer; hr_timer.start("prep"); #endif // // Translate sparse matrix into dense bipartite matrix. // rows are the workers, columns are the tasks // vertex_t num_rows = num_workers; vertex_t num_cols = graph.number_of_vertices - num_rows; vertex_t matrix_dimension = ::max(num_rows, num_cols); rmm::device_uvector<weight_t> cost_v(matrix_dimension * matrix_dimension, handle.get_stream_view()); rmm::device_uvector<vertex_t> tasks_v(num_cols, handle.get_stream_view()); rmm::device_uvector<vertex_t> temp_tasks_v(graph.number_of_vertices, handle.get_stream_view()); rmm::device_uvector<vertex_t> temp_workers_v(graph.number_of_vertices, handle.get_stream_view()); weight_t* d_cost = cost_v.data(); vertex_t* d_tasks = tasks_v.data(); vertex_t* d_temp_tasks = temp_tasks_v.data(); vertex_t* d_temp_workers = temp_workers_v.data(); vertex_t* d_src_indices = graph.src_indices; vertex_t* d_dst_indices = graph.dst_indices; weight_t* d_edge_data = graph.edge_data; // // Renumber vertices internally. Workers will become // rows, tasks will become columns // thrust::sequence(handle.get_thrust_policy(), temp_tasks_v.begin(), temp_tasks_v.end()); thrust::for_each(handle.get_thrust_policy(), workers, workers + num_workers, [d_temp_tasks] __device__(vertex_t v) { d_temp_tasks[v] = -1; }); auto temp_end = thrust::copy_if(handle.get_thrust_policy(), temp_tasks_v.begin(), temp_tasks_v.end(), d_tasks, [] __device__(vertex_t v) { return v >= 0; }); vertex_t size = thrust::distance(d_tasks, temp_end); tasks_v.resize(size, handle.get_stream_view()); // // Now we'll assign costs into the dense array // thrust::fill( handle.get_thrust_policy(), temp_workers_v.begin(), temp_workers_v.end(), vertex_t{-1}); thrust::fill(handle.get_thrust_policy(), temp_tasks_v.begin(), temp_tasks_v.end(), vertex_t{-1}); thrust::fill(handle.get_thrust_policy(), cost_v.begin(), cost_v.end(), weight_t{0}); thrust::for_each( handle.get_thrust_policy(), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_rows), [d_temp_workers, workers] __device__(vertex_t v) { d_temp_workers[workers[v]] = v; }); thrust::for_each( handle.get_thrust_policy(), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_cols), [d_temp_tasks, d_tasks] __device__(vertex_t v) { d_temp_tasks[d_tasks[v]] = v; }); thrust::for_each(handle.get_thrust_policy(), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [d_temp_workers, d_temp_tasks, d_cost, matrix_dimension, d_src_indices, d_dst_indices, d_edge_data] __device__(edge_t loc) { vertex_t src = d_temp_workers[d_src_indices[loc]]; vertex_t dst = d_temp_tasks[d_dst_indices[loc]]; if ((src >= 0) && (dst >= 0)) { d_cost[src * matrix_dimension + dst] = d_edge_data[loc]; } }); #ifdef TIMING hr_timer.stop(); hr_timer.start("hungarian"); #endif // // temp_assignment_v will hold the assignment in the dense // bipartite matrix numbering // rmm::device_uvector<vertex_t> temp_assignment_v(matrix_dimension, handle.get_stream_view()); vertex_t* d_temp_assignment = temp_assignment_v.data(); weight_t min_cost = detail::hungarian( handle, matrix_dimension, matrix_dimension, d_cost, d_temp_assignment, epsilon); #ifdef TIMING hr_timer.stop(); hr_timer.start("translate"); #endif // // Translate the assignment back to the original vertex ids // thrust::for_each(handle.get_thrust_policy(), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_rows), [d_tasks, d_temp_assignment, assignment] __device__(vertex_t id) { assignment[id] = d_tasks[d_temp_assignment[id]]; }); #ifdef TIMING hr_timer.stop(); hr_timer.display(std::cout); #endif return min_cost; } } // namespace detail template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, legacy::GraphCOOView<vertex_t, edge_t, weight_t> const& graph, vertex_t num_workers, vertex_t const* workers, vertex_t* assignment) { return detail::hungarian_sparse( handle, graph, num_workers, workers, assignment, detail::default_epsilon<weight_t>()); } template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, legacy::GraphCOOView<vertex_t, edge_t, weight_t> const& graph, vertex_t num_workers, vertex_t const* workers, vertex_t* assignment, weight_t epsilon) { return detail::hungarian_sparse(handle, graph, num_workers, workers, assignment, epsilon); } template int32_t hungarian<int32_t, int32_t, int32_t>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, int32_t> const&, int32_t, int32_t const*, int32_t*, int32_t); template float hungarian<int32_t, int32_t, float>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, float> const&, int32_t, int32_t const*, int32_t*, float); template double hungarian<int32_t, int32_t, double>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, double> const&, int32_t, int32_t const*, int32_t*, double); template int32_t hungarian<int32_t, int32_t, int32_t>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, int32_t> const&, int32_t, int32_t const*, int32_t*); template float hungarian<int32_t, int32_t, float>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, float> const&, int32_t, int32_t const*, int32_t*); template double hungarian<int32_t, int32_t, double>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, double> const&, int32_t, int32_t const*, int32_t*); namespace dense { template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, weight_t const* costs, index_t num_rows, index_t num_cols, index_t* assignment) { return detail::hungarian( handle, num_rows, num_cols, costs, assignment, detail::default_epsilon<weight_t>()); } template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, weight_t const* costs, index_t num_rows, index_t num_cols, index_t* assignment, weight_t epsilon) { return detail::hungarian(handle, num_rows, num_cols, costs, assignment, epsilon); } template int32_t hungarian<int32_t, int32_t>( raft::handle_t const&, int32_t const*, int32_t, int32_t, int32_t*); template float hungarian<int32_t, float>( raft::handle_t const&, float const*, int32_t, int32_t, int32_t*); template double hungarian<int32_t, double>( raft::handle_t const&, double const*, int32_t, int32_t, int32_t*); template int32_t hungarian<int32_t, int32_t>( raft::handle_t const&, int32_t const*, int32_t, int32_t, int32_t*, int32_t); template float hungarian<int32_t, float>( raft::handle_t const&, float const*, int32_t, int32_t, int32_t*, float); template double hungarian<int32_t, double>( raft::handle_t const&, double const*, int32_t, int32_t, int32_t*, double); } // namespace dense } // namespace cugraph
98a472baa01720122deb576d833759156c12de4f.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/legacy/graph.hpp> #include <cugraph/utilities/error.hpp> #include <raft/lap/lap.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <thrust/for_each.h> #include <thrust/random.h> #include <iostream> #include <limits> //#define TIMING #ifdef TIMING #include <utilities/high_res_timer.hpp> #endif namespace cugraph { namespace detail { template <typename weight_t> weight_t default_epsilon() { return 0; } template <> float default_epsilon() { return float{1e-6}; } template <> double default_epsilon() { return double{1e-6}; } template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, index_t num_rows, index_t num_cols, weight_t const* d_original_cost, index_t* d_assignment, weight_t epsilon) { if (num_rows == num_cols) { rmm::device_uvector<index_t> col_assignments_v(num_rows, handle.get_stream_view()); // Create an instance of LinearAssignmentProblem using problem size, number of subproblems raft::lap::LinearAssignmentProblem<index_t, weight_t> lpx(handle, num_rows, 1, epsilon); // Solve LAP(s) for given cost matrix lpx.solve(d_original_cost, d_assignment, col_assignments_v.data()); return lpx.getPrimalObjectiveValue(0); } else { // // Create a square matrix, copy d_original_cost into it. // Fill the extra rows/columns with max(d_original_cost) // index_t n = std::max(num_rows, num_cols); weight_t max_cost = thrust::reduce(handle.get_thrust_policy(), d_original_cost, d_original_cost + (num_rows * num_cols), weight_t{0}, thrust::maximum<weight_t>()); rmm::device_uvector<weight_t> tmp_cost_v(n * n, handle.get_stream_view()); rmm::device_uvector<index_t> tmp_row_assignment_v(n, handle.get_stream_view()); rmm::device_uvector<index_t> tmp_col_assignment_v(n, handle.get_stream_view()); thrust::transform(handle.get_thrust_policy(), thrust::make_counting_iterator<index_t>(0), thrust::make_counting_iterator<index_t>(n * n), tmp_cost_v.begin(), [max_cost, d_original_cost, n, num_rows, num_cols] __device__(index_t i) { index_t row = i / n; index_t col = i % n; return ((row < num_rows) && (col < num_cols)) ? d_original_cost[row * num_cols + col] : max_cost; }); raft::lap::LinearAssignmentProblem<index_t, weight_t> lpx(handle, n, 1, epsilon); // Solve LAP(s) for given cost matrix lpx.solve(tmp_cost_v.begin(), tmp_row_assignment_v.begin(), tmp_col_assignment_v.begin()); weight_t tmp_objective_value = lpx.getPrimalObjectiveValue(0); raft::copy(d_assignment, tmp_row_assignment_v.begin(), num_rows, handle.get_stream()); return tmp_objective_value - max_cost * std::abs(num_rows - num_cols); } } template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian_sparse(raft::handle_t const& handle, legacy::GraphCOOView<vertex_t, edge_t, weight_t> const& graph, vertex_t num_workers, vertex_t const* workers, vertex_t* assignment, weight_t epsilon) { CUGRAPH_EXPECTS(assignment != nullptr, "Invalid input argument: assignment pointer is NULL"); CUGRAPH_EXPECTS(graph.edge_data != nullptr, "Invalid input argument: graph must have edge data (costs)"); #ifdef TIMING HighResTimer hr_timer; hr_timer.start("prep"); #endif // // Translate sparse matrix into dense bipartite matrix. // rows are the workers, columns are the tasks // vertex_t num_rows = num_workers; vertex_t num_cols = graph.number_of_vertices - num_rows; vertex_t matrix_dimension = std::max(num_rows, num_cols); rmm::device_uvector<weight_t> cost_v(matrix_dimension * matrix_dimension, handle.get_stream_view()); rmm::device_uvector<vertex_t> tasks_v(num_cols, handle.get_stream_view()); rmm::device_uvector<vertex_t> temp_tasks_v(graph.number_of_vertices, handle.get_stream_view()); rmm::device_uvector<vertex_t> temp_workers_v(graph.number_of_vertices, handle.get_stream_view()); weight_t* d_cost = cost_v.data(); vertex_t* d_tasks = tasks_v.data(); vertex_t* d_temp_tasks = temp_tasks_v.data(); vertex_t* d_temp_workers = temp_workers_v.data(); vertex_t* d_src_indices = graph.src_indices; vertex_t* d_dst_indices = graph.dst_indices; weight_t* d_edge_data = graph.edge_data; // // Renumber vertices internally. Workers will become // rows, tasks will become columns // thrust::sequence(handle.get_thrust_policy(), temp_tasks_v.begin(), temp_tasks_v.end()); thrust::for_each(handle.get_thrust_policy(), workers, workers + num_workers, [d_temp_tasks] __device__(vertex_t v) { d_temp_tasks[v] = -1; }); auto temp_end = thrust::copy_if(handle.get_thrust_policy(), temp_tasks_v.begin(), temp_tasks_v.end(), d_tasks, [] __device__(vertex_t v) { return v >= 0; }); vertex_t size = thrust::distance(d_tasks, temp_end); tasks_v.resize(size, handle.get_stream_view()); // // Now we'll assign costs into the dense array // thrust::fill( handle.get_thrust_policy(), temp_workers_v.begin(), temp_workers_v.end(), vertex_t{-1}); thrust::fill(handle.get_thrust_policy(), temp_tasks_v.begin(), temp_tasks_v.end(), vertex_t{-1}); thrust::fill(handle.get_thrust_policy(), cost_v.begin(), cost_v.end(), weight_t{0}); thrust::for_each( handle.get_thrust_policy(), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_rows), [d_temp_workers, workers] __device__(vertex_t v) { d_temp_workers[workers[v]] = v; }); thrust::for_each( handle.get_thrust_policy(), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_cols), [d_temp_tasks, d_tasks] __device__(vertex_t v) { d_temp_tasks[d_tasks[v]] = v; }); thrust::for_each(handle.get_thrust_policy(), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [d_temp_workers, d_temp_tasks, d_cost, matrix_dimension, d_src_indices, d_dst_indices, d_edge_data] __device__(edge_t loc) { vertex_t src = d_temp_workers[d_src_indices[loc]]; vertex_t dst = d_temp_tasks[d_dst_indices[loc]]; if ((src >= 0) && (dst >= 0)) { d_cost[src * matrix_dimension + dst] = d_edge_data[loc]; } }); #ifdef TIMING hr_timer.stop(); hr_timer.start("hungarian"); #endif // // temp_assignment_v will hold the assignment in the dense // bipartite matrix numbering // rmm::device_uvector<vertex_t> temp_assignment_v(matrix_dimension, handle.get_stream_view()); vertex_t* d_temp_assignment = temp_assignment_v.data(); weight_t min_cost = detail::hungarian( handle, matrix_dimension, matrix_dimension, d_cost, d_temp_assignment, epsilon); #ifdef TIMING hr_timer.stop(); hr_timer.start("translate"); #endif // // Translate the assignment back to the original vertex ids // thrust::for_each(handle.get_thrust_policy(), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_rows), [d_tasks, d_temp_assignment, assignment] __device__(vertex_t id) { assignment[id] = d_tasks[d_temp_assignment[id]]; }); #ifdef TIMING hr_timer.stop(); hr_timer.display(std::cout); #endif return min_cost; } } // namespace detail template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, legacy::GraphCOOView<vertex_t, edge_t, weight_t> const& graph, vertex_t num_workers, vertex_t const* workers, vertex_t* assignment) { return detail::hungarian_sparse( handle, graph, num_workers, workers, assignment, detail::default_epsilon<weight_t>()); } template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, legacy::GraphCOOView<vertex_t, edge_t, weight_t> const& graph, vertex_t num_workers, vertex_t const* workers, vertex_t* assignment, weight_t epsilon) { return detail::hungarian_sparse(handle, graph, num_workers, workers, assignment, epsilon); } template int32_t hungarian<int32_t, int32_t, int32_t>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, int32_t> const&, int32_t, int32_t const*, int32_t*, int32_t); template float hungarian<int32_t, int32_t, float>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, float> const&, int32_t, int32_t const*, int32_t*, float); template double hungarian<int32_t, int32_t, double>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, double> const&, int32_t, int32_t const*, int32_t*, double); template int32_t hungarian<int32_t, int32_t, int32_t>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, int32_t> const&, int32_t, int32_t const*, int32_t*); template float hungarian<int32_t, int32_t, float>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, float> const&, int32_t, int32_t const*, int32_t*); template double hungarian<int32_t, int32_t, double>( raft::handle_t const&, legacy::GraphCOOView<int32_t, int32_t, double> const&, int32_t, int32_t const*, int32_t*); namespace dense { template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, weight_t const* costs, index_t num_rows, index_t num_cols, index_t* assignment) { return detail::hungarian( handle, num_rows, num_cols, costs, assignment, detail::default_epsilon<weight_t>()); } template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const& handle, weight_t const* costs, index_t num_rows, index_t num_cols, index_t* assignment, weight_t epsilon) { return detail::hungarian(handle, num_rows, num_cols, costs, assignment, epsilon); } template int32_t hungarian<int32_t, int32_t>( raft::handle_t const&, int32_t const*, int32_t, int32_t, int32_t*); template float hungarian<int32_t, float>( raft::handle_t const&, float const*, int32_t, int32_t, int32_t*); template double hungarian<int32_t, double>( raft::handle_t const&, double const*, int32_t, int32_t, int32_t*); template int32_t hungarian<int32_t, int32_t>( raft::handle_t const&, int32_t const*, int32_t, int32_t, int32_t*, int32_t); template float hungarian<int32_t, float>( raft::handle_t const&, float const*, int32_t, int32_t, int32_t*, float); template double hungarian<int32_t, double>( raft::handle_t const&, double const*, int32_t, int32_t, int32_t*, double); } // namespace dense } // namespace cugraph
e1af6d7f33dfc72765a12c1965526e29a82112b8.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <rocblas.h> #include <hip/hip_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { hipblasHandle_t cublasH = NULL; hipStream_t stream = NULL; /* * A = | 1.0 2.0 3.0 4.0 | */ std::vector<data_type> A = {1.0, 2.0, 3.0, 4.0}; const int incx = 1; data_type result = 0.0; data_type *d_A = nullptr; printf("A\n"); print_vector(A.size(), A.data()); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(hipblasCreate(&cublasH)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUBLAS_CHECK(hipblasSetStream(cublasH, stream)); /* step 2: copy data to device */ CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size())); CUDA_CHECK(hipMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), hipMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK(hipblasDnrm2(cublasH, A.size(), d_A, incx, &result)); /* step 4: copy data to host */ CUDA_CHECK(hipMemcpyAsync(A.data(), d_A, sizeof(data_type) * A.size(), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); /* * Result = 5.48 */ printf("Result\n"); printf("%0.2f\n", result); printf("=====\n"); /* free resources */ CUDA_CHECK(hipFree(d_A)); CUBLAS_CHECK(hipblasDestroy(cublasH)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
e1af6d7f33dfc72765a12c1965526e29a82112b8.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <cublas_v2.h> #include <cuda_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { cublasHandle_t cublasH = NULL; cudaStream_t stream = NULL; /* * A = | 1.0 2.0 3.0 4.0 | */ std::vector<data_type> A = {1.0, 2.0, 3.0, 4.0}; const int incx = 1; data_type result = 0.0; data_type *d_A = nullptr; printf("A\n"); print_vector(A.size(), A.data()); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(cublasCreate(&cublasH)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUBLAS_CHECK(cublasSetStream(cublasH, stream)); /* step 2: copy data to device */ CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size())); CUDA_CHECK(cudaMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), cudaMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK(cublasDnrm2(cublasH, A.size(), d_A, incx, &result)); /* step 4: copy data to host */ CUDA_CHECK(cudaMemcpyAsync(A.data(), d_A, sizeof(data_type) * A.size(), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); /* * Result = 5.48 */ printf("Result\n"); printf("%0.2f\n", result); printf("=====\n"); /* free resources */ CUDA_CHECK(cudaFree(d_A)); CUBLAS_CHECK(cublasDestroy(cublasH)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
3b6366f82926f6eee002db199ae2d07715968809.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/cudnn/cudnn.hpp> #include <nbla/cuda/cudnn/function/lstm.hpp> #include <nbla/variable.hpp> #include <array> #include <random> namespace nbla { template <typename ARRAY> typename ARRAY::value_type array_product(const ARRAY &arr) { typename ARRAY::value_type p = 1; for (auto a : arr) { p *= a; } return p; } template <typename T> __global__ void kernel_forward_copy_weights(size_t size, const T *weight, T *param, int j_stride, int input_dim) { NBLA_CUDA_KERNEL_LOOP(i, size) { int stride; stride = (i / input_dim) * j_stride; param[i] = weight[i + stride]; } } template <typename T> __global__ void kernel_forward_copy_bias(size_t size, const T *bias, T *param) { NBLA_CUDA_KERNEL_LOOP(i, size) { param[i] = bias[i]; } } template <typename T> __global__ void kernel_accumulate_x_and_h(size_t size, const T *d_ptr, T *d) { NBLA_CUDA_KERNEL_LOOP(i, size) { d[i] += d_ptr[i]; } } template <typename T> __global__ void kernel_backward_copy_weights(size_t size, T *g_weight, T *g_param, int j_stride, int input_dim, bool accum) { NBLA_CUDA_KERNEL_LOOP(i, size) { int stride; stride = (i / input_dim) * j_stride; if (accum) { g_weight[i + stride] += g_param[i]; } else { g_weight[i + stride] = g_param[i]; } } } template <typename T> __global__ void kernel_backward_copy_bias(size_t size, T *g_bias, T *g_param, bool accum) { NBLA_CUDA_KERNEL_LOOP(i, size) { if (accum) { g_bias[i] += g_param[i]; } else { g_bias[i] = g_param[i]; } } } template <typename T> void LSTMCudaCudnn<T>::copy_weight_bias_to_params( Tcu *params, const Tcu *w_init, const Tcu *weight, const Tcu *bias, bool weight_exists, bool bias_exists) { for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_; layer_id++) { for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_; lin_layer_id++) { int param_index = layer_id * num_lin_layers_ + lin_layer_id; int inweight_offset = 0; if (layer_id / num_directions_ == 0) { if (lin_layer_id < 4) { inweight_offset = layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ + lin_layer_id * hidden_size_ * (input_dim_ + hidden_size_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_weights<Tcu>), weight_offsets_[param_index].second, w_init + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), hidden_size_, input_dim_); } else { inweight_offset = layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ + (lin_layer_id - 4) * hidden_size_ * (input_dim_ + hidden_size_) + input_dim_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_weights<Tcu>), weight_offsets_[param_index].second, w_init + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), input_dim_, hidden_size_) } } else { if (lin_layer_id < 4) { inweight_offset = (layer_id - num_directions_) * (num_directions_ * hidden_size_ + hidden_size_) * 4 * hidden_size_ + lin_layer_id * hidden_size_ * (num_directions_ * hidden_size_ + hidden_size_); if (this->num_layers_ > 1 && weight_exists) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_weights<Tcu>), weight_offsets_[param_index].second, weight + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), hidden_size_, num_directions_ * hidden_size_); } } else { inweight_offset = (layer_id - num_directions_) * (num_directions_ * hidden_size_ + hidden_size_) * 4 * hidden_size_ + (lin_layer_id - 4) * hidden_size_ * (num_directions_ * hidden_size_ + hidden_size_) + num_directions_ * hidden_size_; if (this->num_layers_ > 1 && weight_exists) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_weights<Tcu>), weight_offsets_[param_index].second, weight + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), num_directions_ * hidden_size_, hidden_size_); } } } if (bias_exists && bias && lin_layer_id < 4) { // copy only when lin_layer_id = 0, 1, 2, 3 NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_bias<Tcu>), bias_offsets_[param_index].second, bias + 4 * layer_id * hidden_size_ + lin_layer_id * hidden_size_, params + bias_offsets_[param_index].first / sizeof(T)); } } } } template <typename T> void LSTMCudaCudnn<T>::copy_params_to_gradients( Tcu *params, Tcu *w_init, Tcu *weight, Tcu *bias, bool w_init_accum, bool w_accum, bool b_accum, bool w_init_propagate, bool w_propagate, bool b_propagate) { for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_; layer_id++) { for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_; lin_layer_id++) { int param_index = layer_id * num_lin_layers_ + lin_layer_id; int inweight_offset = 0; if (layer_id / num_directions_ == 0) { if (lin_layer_id < 4) { inweight_offset = layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ + lin_layer_id * hidden_size_ * (input_dim_ + hidden_size_); if (w_init_propagate) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_weights<Tcu>), weight_offsets_[param_index].second, w_init + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), hidden_size_, input_dim_, w_init_accum); } } else { inweight_offset = layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ + (lin_layer_id - 4) * hidden_size_ * (input_dim_ + hidden_size_) + input_dim_; if (w_init_propagate) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_weights<Tcu>), weight_offsets_[param_index].second, w_init + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), input_dim_, hidden_size_, w_init_accum); } } } else { if (lin_layer_id < 4) { inweight_offset = (layer_id - num_directions_) * (num_directions_ * hidden_size_ + hidden_size_) * 4 * hidden_size_ + lin_layer_id * hidden_size_ * (num_directions_ * hidden_size_ + hidden_size_); if (w_propagate) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_weights<Tcu>), weight_offsets_[param_index].second, weight + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), hidden_size_, num_directions_ * hidden_size_, w_accum); } } else { inweight_offset = (layer_id - num_directions_) * (num_directions_ * hidden_size_ + hidden_size_) * 4 * hidden_size_ + (lin_layer_id - 4) * hidden_size_ * (num_directions_ * hidden_size_ + hidden_size_) + num_directions_ * hidden_size_; if (w_propagate) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_weights<Tcu>), weight_offsets_[param_index].second, weight + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), num_directions_ * hidden_size_, hidden_size_, w_accum); } } } if (bias && b_propagate && lin_layer_id < 4) { // copy only when lin_layer_id = 0, 1, 2, 3 NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_bias<Tcu>), bias_offsets_[param_index].second, bias + 4 * layer_id * hidden_size_ + lin_layer_id * hidden_size_, params + bias_offsets_[param_index].first / sizeof(T), b_accum); } } } } template <typename T> void LSTMCudaCudnn<T>::setup_impl(const Variables &inputs, const Variables &outputs) { // Create x descriptors and y descriptors by resizing // Set descriptors cuda_set_device(this->device_); auto cudnn_handle = SingletonManager::get<CudnnHandleManager>()->handle(this->device_); Shape_t inshape = inputs[0]->shape(); Shape_t hshape = inputs[1]->shape(); Shape_t cshape = inputs[2]->shape(); Shape_t outshape = outputs[0]->shape(); // Check input dimensions NBLA_CHECK(inputs[0]->ndim() == 3, error_code::value, "Input x must be a 3 dimensional array with a shape of (steps, " "batch_size, input_size)."); // Get input dimensions cudnnDataType_t dt_ = cudnn_data_type<T>::type(); seq_len_ = inshape[0]; int batch_size = inshape[1]; input_dim_ = inshape[2]; // Assuming this function takes h as (numLayer, numD, B, M) hidden_size_ = inputs[1]->shape()[3]; inputMode = CUDNN_LINEAR_INPUT; num_directions_ = this->bidirectional_ ? 2 : 1; direction = this->bidirectional_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL; RNNMode = miopenLSTM; num_lin_layers_ = 8; // Check shape of h & c const char *error_msg_h = "Input h must be a 4 dimensional array with a " "shape of (num_layers, num_directions, batch_size, " "hidden_size)."; NBLA_CHECK(inputs[1]->ndim() == 4, error_code::value, error_msg_h); NBLA_CHECK(hshape[0] == this->num_layers_, error_code::value, error_msg_h); NBLA_CHECK(hshape[1] == num_directions_, error_code::value, error_msg_h); NBLA_CHECK(hshape[2] == batch_size, error_code::value, error_msg_h); NBLA_CHECK(hshape == cshape, error_code::value, "Input c must has the same shape as input h."); // Check weight shape at 0th layer Shape_t w0_shape = inputs[3]->shape(); const char *error_msg_w0 = "Input w0 must be a 4 dimensional array with a " "shape of (num_directions, 4, hidden_size, " "input_size + hidden_size)."; NBLA_CHECK(inputs[2]->ndim() == 4, error_code::value, error_msg_w0); NBLA_CHECK(w0_shape[0] == num_directions_, error_code::value, error_msg_w0); NBLA_CHECK(w0_shape[1] == 4, error_code::value, error_msg_w0); NBLA_CHECK(w0_shape[2] == hidden_size_, error_code::value, error_msg_w0); NBLA_CHECK(w0_shape[3] == hidden_size_ + input_dim_, error_code::value, error_msg_w0); weight_exists_ = true; bias_exists_ = true; if (inputs.size() == 4) { weight_exists_ = false; bias_exists_ = false; } else if (inputs.size() == 5) { Shape_t opt_shape = inputs[4]->shape(); if (this->num_layers_ > 1 && opt_shape.size() == 5) { bias_exists_ = false; } else if (this->num_layers_ > 1 && opt_shape.size() != 5) { NBLA_ERROR(error_code::value, "Weight argument must be passed when num_layers > 1"); } else if (this->num_layers_ == 1 && opt_shape.size() != 4) { NBLA_ERROR(error_code::value, "Weight argument cannot be passed when num_layers == 1"); } else if (this->num_layers_ == 1 && opt_shape.size() == 4) { weight_exists_ = false; } } else if ((inputs.size() > 5) && (this->num_layers_ == 1)) { NBLA_ERROR(error_code::value, "Weight argument cannot be passed when num_layers == 1"); } // Check weight shape if (weight_exists_) { Shape_t w_shape = inputs[4]->shape(); const char *error_msg_w = "Input w must be a 5 dimensional array with a " "shape of (num_layers - 1, num_directions, 4, " "hidden_size, num_directions * hidden_size + " "hidden_size)."; NBLA_CHECK(inputs[4]->ndim() == 5, error_code::value, error_msg_w); NBLA_CHECK(w_shape[0] == this->num_layers_ - 1, error_code::value, error_msg_w); NBLA_CHECK(w_shape[1] == num_directions_, error_code::value, error_msg_w); NBLA_CHECK(w_shape[2] == 4, error_code::value, error_msg_w); NBLA_CHECK(w_shape[3] == hidden_size_, error_code::value, error_msg_w); NBLA_CHECK(w_shape[4] == num_directions_ * hidden_size_ + hidden_size_, error_code::value, error_msg_w); } // Check bias shape if (bias_exists_) { const int b_index = weight_exists_ ? 5 : 4; Shape_t b_shape = inputs[b_index]->shape(); const char *error_msg_b = "Input b must be a 4 dimensional array with a " "shape of (num_layers, 4, num_directions, " "hidden_size)."; NBLA_CHECK(inputs[b_index]->ndim() == 4, error_code::value, error_msg_b); NBLA_CHECK(b_shape[0] == this->num_layers_, error_code::value, error_msg_b); NBLA_CHECK(b_shape[1] == num_directions_, error_code::value, error_msg_b); NBLA_CHECK(b_shape[2] == 4, error_code::value, error_msg_b); NBLA_CHECK(b_shape[3] == hidden_size_, error_code::value, error_msg_b); } // Set X desc // xdesc : T * (B, N, 1) // x : (T, B, N) row-major x_desc_.reset(new WCudnnTensorDescArray(seq_len_)); for (auto &x : x_desc_->desc_array()) { std::array<int, 3> dimA{batch_size, input_dim_, 1}; std::array<int, 3> strideA{input_dim_, 1, 1}; NBLA_CUDNN_CHECK(cudnnSetTensorNdDescriptor(x, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); } // Set hx and hy desc // hxDesc : (numLayer * numD, B, M) // hx : (numLayer, numD, B, M) row-major >>> or (numD, numLayer, B, M) // row-major // hyDesc : (numLayer * numD, B, M) // hy : (numLayer, numD, B, M) row-major >>> or (numD, numLayer, B, M) // row-major { std::array<int, 3> dimA{this->num_layers_ * num_directions_, batch_size, hidden_size_}; std::array<int, 3> strideA{batch_size * hidden_size_, hidden_size_, 1}; NBLA_CUDNN_CHECK( cudnnSetTensorNdDescriptor(h_desc_.desc, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); NBLA_CUDNN_CHECK( cudnnSetTensorNdDescriptor(h_n_desc_.desc, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); } // cx and cy { std::array<int, 3> dimA{this->num_layers_ * num_directions_, batch_size, hidden_size_}; std::array<int, 3> strideA{batch_size * hidden_size_, hidden_size_, 1}; NBLA_CUDNN_CHECK( cudnnSetTensorNdDescriptor(c_x_desc_.desc, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); NBLA_CUDNN_CHECK( cudnnSetTensorNdDescriptor(c_y_desc_.desc, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); } // Set Y desc // yDesc : T * (B, M * numD, 1) // y : (T, B, M, numD) row-major, >>> or (T, B, numD, M) y_desc_.reset(new WCudnnTensorDescArray(seq_len_)); for (auto &y : y_desc_->desc_array()) { std::array<int, 3> dimA{batch_size, hidden_size_ * num_directions_, 1}; std::array<int, 3> strideA{hidden_size_ * num_directions_, 1, 1}; NBLA_CUDNN_CHECK(cudnnSetTensorNdDescriptor(y, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); } // Get an RNN algorithm using cudnnGetRNNAlgorithm or cudnnFindRNNAlgorithm. // NOTE: find algorithm executes many algorithms exhaustively, and find a best // one. // Set dropout descriptor size_t dropout_stateSize; NBLA_CUDNN_CHECK(cudnnDropoutGetStatesSize(cudnn_handle, &dropout_stateSize)); state_array_ = make_shared<CudaCachedArray>(dropout_stateSize, dtypes::BYTE, this->ctx_); void *state_ptr = state_array_->pointer<void>(); std::random_device seed_gen; std::default_random_engine engine(seed_gen()); std::uniform_int_distribution<> dist(0, 999); NBLA_CUDNN_CHECK(cudnnSetDropoutDescriptor(dropout_desc_.desc, cudnn_handle, this->dropout_, state_ptr, dropout_stateSize, dist(engine))); // Set RNN descriptor. #if CUDNN_VERSION >= 7000 NBLA_CUDNN_CHECK(cudnnSetRNNDescriptor_v5( rnn_desc_.desc, hidden_size_, this->num_layers_, dropout_desc_.desc, inputMode, direction, RNNMode, dt_)); #else NBLA_CUDNN_CHECK(cudnnSetRNNDescriptor(rnn_desc_.desc, hidden_size_, this->num_layers_, dropout_desc_.desc, inputMode, direction, RNNMode, dt_)); #endif // Get workspace size and reserve size NBLA_CUDNN_CHECK(cudnnGetRNNWorkspaceSize(cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), &workspace_size_)); if (this->training_) { NBLA_CUDNN_CHECK( cudnnGetRNNTrainingReserveSize(cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), &reserve_size_)); } // Get number of pararameters both in bytes and in elements. NBLA_CUDNN_CHECK(cudnnGetRNNParamsSize(cudnn_handle, rnn_desc_.desc, x_desc_->data()[0], &params_size_in_bytes_, dt_)); total_params_ = params_size_in_bytes_ / sizeof(T); // Set params descriptor { std::array<int, 3> filter_dims{(int)total_params_, 1, 1}; NBLA_CUDNN_CHECK(cudnnSetFilterNdDescriptor( params_desc_.desc, cudnn_data_type<T>::type(), CUDNN_TENSOR_NCHW, 3, filter_dims.data())); } // Calculate address corerspondences between input parameters (weights and // biases) and flattened parameters buffer. // weight : [H, I+H] // bias : [H] // Temporary buffer. This is used only for getting address offsets of matrix // and biases from the head of the params pointer. CudaCachedArray params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); Tcu *params = params_array.pointer<Tcu>(); weight_offsets_.clear(); bias_offsets_.clear(); WCudnnFilterDesc lin_layer_mat_desc; for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_; layer_id++) { for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_; lin_layer_id++) { void *matrix_pointer; int nb_dims; cudnnDataType_t data_type; cudnnTensorFormat_t format; std::array<int, 3> dim; // Get an address pointing to a weight matrix corresponding layer_id and // linear_id, and its shape. NBLA_CUDNN_CHECK(cudnnGetRNNLinLayerMatrixParams( cudnn_handle, rnn_desc_.desc, layer_id, x_desc_->data()[0], params_desc_.desc, params, lin_layer_id, lin_layer_mat_desc.desc, &matrix_pointer)); NBLA_CUDNN_CHECK(cudnnGetFilterNdDescriptor(lin_layer_mat_desc.desc, 3, &data_type, &format, &nb_dims, dim.data())); // Size of the weight matrix can be obtained by a product of dim // elements. int weight_size = array_product(dim); weight_offsets_.push_back( {intptr_t(matrix_pointer) - intptr_t(params), weight_size}); // Get an address pointer of a bias vector corresponding to layer_id and // linear_id, and get its size. NBLA_CUDNN_CHECK(cudnnGetRNNLinLayerBiasParams( cudnn_handle, rnn_desc_.desc, layer_id, x_desc_->data()[0], params_desc_.desc, params, lin_layer_id, lin_layer_mat_desc.desc, &matrix_pointer)); NBLA_CUDNN_CHECK(cudnnGetFilterNdDescriptor(lin_layer_mat_desc.desc, 3, &data_type, &format, &nb_dims, dim.data())); // Size of the bias vector can be obtained by a product of dim elements. int bias_size = array_product(dim); bias_offsets_.push_back( {intptr_t(matrix_pointer) - intptr_t(params), bias_size}); } } // Set output shapes outputs[0]->reshape({seq_len_, batch_size, num_directions_ * hidden_size_}, true); outputs[1]->reshape(inputs[1]->shape(), true); outputs[2]->reshape(inputs[2]->shape(), true); } template <typename T> void LSTMCudaCudnn<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->training_) { // Training mode. forward_impl_training(inputs, outputs); } else { // Testing mode. forward_impl_inference(inputs, outputs); } } template <typename T> void LSTMCudaCudnn<T>::forward_impl_training(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto cudnn_handle = SingletonManager::get<CudnnHandleManager>()->handle(this->device_); // Inputs and outputs const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_); const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_); const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_); const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_); const Tcu *weight{nullptr}; const Tcu *bias{nullptr}; Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_); Tcu *h_n = outputs[1]->cast_data_and_get_pointer<Tcu>(this->ctx_); Tcu *c_n = outputs[2]->cast_data_and_get_pointer<Tcu>(this->ctx_); if (inputs.size() == 5) { if (weight_exists_) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } else if (bias_exists_) { bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } } if (inputs.size() > 5) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_); } // Create flattened weight buffer. CudaCachedArray params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); params_array.zero(); // Initialize params with 0 Tcu *params = params_array.pointer<Tcu>(); this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_, bias_exists_); shared_ptr<CudaCachedArray> mem_workspace{nullptr}; if (workspace_size_) { mem_workspace.reset( new CudaCachedArray(workspace_size_, dtypes::BYTE, this->ctx_)); } if (mem_reservespace_) { NBLA_CHECK(mem_reservespace_->size() == reserve_size_, error_code::value, "reserve_size_ is inconsistent with the previously set " "reservespace size."); } mem_reservespace_.reset( new CudaCachedArray(reserve_size_, dtypes::BYTE, this->ctx_)); auto alpha = get_cudnn_scalar_arg<T>(1); auto beta = get_cudnn_scalar_arg<T>(0); NBLA_CUDNN_CHECK(cudnnRNNForwardTraining( cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc, h, c_x_desc_.desc, c, params_desc_.desc, params, y_desc_->data(), y, h_n_desc_.desc, h_n, c_y_desc_.desc, c_n, mem_workspace->pointer(), workspace_size_, mem_reservespace_->pointer(), reserve_size_)); } template <typename T> void LSTMCudaCudnn<T>::forward_impl_inference(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto cudnn_handle = SingletonManager::get<CudnnHandleManager>()->handle(this->device_); const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_); const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_); const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_); const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_); const Tcu *weight{nullptr}; const Tcu *bias{nullptr}; Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_); Tcu *h_n = outputs[1]->cast_data_and_get_pointer<Tcu>(this->ctx_); Tcu *c_n = outputs[2]->cast_data_and_get_pointer<Tcu>(this->ctx_); if (inputs.size() == 5) { if (weight_exists_) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } else if (bias_exists_) { bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } } if (inputs.size() > 5) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_); } // Create flattened weight buffer. CudaCachedArray params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); params_array.zero(); // Initialize params with 0 Tcu *params = params_array.pointer<Tcu>(); this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_, bias_exists_); shared_ptr<CudaCachedArray> mem_workspace{nullptr}; if (workspace_size_) { mem_workspace.reset( new CudaCachedArray(workspace_size_, dtypes::BYTE, this->ctx_)); } NBLA_CUDNN_CHECK(cudnnRNNForwardInference( cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc, h, c_x_desc_.desc, c, params_desc_.desc, params, y_desc_->data(), y, h_n_desc_.desc, h_n, c_y_desc_.desc, c_n, mem_workspace ? mem_workspace->pointer() : nullptr, workspace_size_)); } template <typename T> void LSTMCudaCudnn<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || propagate_down[1] || propagate_down[2] || propagate_down[3] || (inputs.size() > 4 && propagate_down[4]) || (inputs.size() > 5 && propagate_down[5]))) { return; } NBLA_CHECK(this->training_, error_code::value, "Backward is called for training only."); NBLA_CHECK(mem_reservespace_, error_code::value, "Reserve space should be allocated memory space."); NBLA_CHECK(mem_reservespace_->size() == reserve_size_, error_code::value, "reserve_size_ is inconsistent with the previously set " "reservespace size."); if (inputs.size() > 5 && propagate_down[5]) { NBLA_CHECK(propagate_down[3] == propagate_down[4], error_code::value, "If bias is backpropagated, so should weights."); } cuda_set_device(this->device_); auto cudnn_handle = SingletonManager::get<CudnnHandleManager>()->handle(this->device_); const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_); const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_); const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_); const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_); const Tcu *weight{nullptr}; const Tcu *bias{nullptr}; const Tcu *g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); const Tcu *g_h_n = outputs[1]->get_grad_pointer<Tcu>(this->ctx_); const Tcu *g_c_n = outputs[2]->get_grad_pointer<Tcu>(this->ctx_); if (inputs.size() == 5) { if (weight_exists_) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } else if (bias_exists_) { bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } } if (inputs.size() > 5) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_); } const Tcu *y = outputs[0]->get_data_pointer<Tcu>(this->ctx_); const Tcu *h_n = outputs[1]->get_data_pointer<Tcu>(this->ctx_); const Tcu *c_n = outputs[2]->get_data_pointer<Tcu>(this->ctx_); Tcu *g_x{nullptr}; Tcu *g_h{nullptr}; Tcu *g_c{nullptr}; Tcu *g_w_init{nullptr}; Tcu *g_weight{nullptr}; Tcu *g_bias{nullptr}; CudaCachedArray params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); CudaCachedArray g_params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); params_array.zero(); // Initialize params with 0 g_params_array.zero(); Tcu *params = params_array.pointer<Tcu>(); Tcu *g_params = g_params_array.pointer<Tcu>(); this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_, bias_exists_); if (propagate_down[0]) { g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]); } if (propagate_down[1]) { g_h = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]); } if (propagate_down[2]) { g_c = inputs[2]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[2]); } if (propagate_down[3]) { g_w_init = inputs[3]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[3]); } if (inputs.size() == 5 && propagate_down[4]) { if (weight_exists_) { g_weight = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]); } else if (bias_exists_) { g_bias = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]); } } if (inputs.size() == 6 && propagate_down[4]) { g_weight = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]); } if (inputs.size() == 6 && propagate_down[5]) { g_bias = inputs[5]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[5]); } shared_ptr<CudaCachedArray> mem_workspace{nullptr}; if (workspace_size_) { mem_workspace.reset( new CudaCachedArray(workspace_size_, dtypes::BYTE, this->ctx_)); } shared_ptr<CudaCachedArray> mem_x_accum{nullptr}; shared_ptr<CudaCachedArray> mem_h_accum{nullptr}; shared_ptr<CudaCachedArray> mem_c_accum{nullptr}; Tcu *dx_tmp = g_x; Tcu *dh_tmp = g_h; Tcu *dc_tmp = g_c; if (!propagate_down[0] || accum[0]) { mem_x_accum.reset(new CudaCachedArray(inputs[0]->size() * sizeof(Tcu), dtypes::BYTE, this->ctx_)); dx_tmp = mem_x_accum->pointer<Tcu>(); } if (!propagate_down[1] || accum[1]) { mem_h_accum.reset(new CudaCachedArray(inputs[1]->size() * sizeof(Tcu), dtypes::BYTE, this->ctx_)); dh_tmp = mem_h_accum->pointer<Tcu>(); } if (!propagate_down[2] || accum[2]) { mem_c_accum.reset(new CudaCachedArray(inputs[2]->size() * sizeof(Tcu), dtypes::BYTE, this->ctx_)); dc_tmp = mem_c_accum->pointer<Tcu>(); } NBLA_CUDNN_CHECK(cudnnRNNBackwardData( cudnn_handle, rnn_desc_.desc, seq_len_, y_desc_->data(), y, y_desc_->data(), g_y, h_n_desc_.desc, g_h_n, c_y_desc_.desc, g_c_n, params_desc_.desc, params, h_desc_.desc, h, c_x_desc_.desc, c, x_desc_->data(), dx_tmp, h_desc_.desc, dh_tmp, c_x_desc_.desc, dc_tmp, mem_workspace->pointer(), workspace_size_, mem_reservespace_->pointer(), reserve_size_)); if (propagate_down[0] && accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>), inputs[0]->size(), dx_tmp, g_x); } if (propagate_down[1] && accum[1]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>), inputs[1]->size(), dh_tmp, g_h); } if (propagate_down[2] && accum[2]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>), inputs[3]->size(), dc_tmp, g_c); } if (propagate_down[3] || (inputs.size() > 4 && propagate_down[4]) || (inputs.size() == 6 && propagate_down[5])) { NBLA_CUDNN_CHECK(cudnnRNNBackwardWeights( cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc, h, y_desc_->data(), y, mem_workspace->pointer(), workspace_size_, params_desc_.desc, g_params, mem_reservespace_->pointer(), reserve_size_)); } bool w_init_accum = false; bool w_accum = false; bool b_accum = false; bool w_prop = false; bool b_prop = false; if (propagate_down[3] && accum[3]) { w_init_accum = true; } if (inputs.size() > 4 && propagate_down[4]) { if (inputs.size() == 5 && weight_exists_) { w_prop = true; if (accum[4]) { w_accum = true; } } else if (inputs.size() == 5 && bias_exists_) { b_prop = true; if (accum[4]) { b_accum = true; } } else { w_prop = true; if (accum[4]) { w_accum = true; } } } if (inputs.size() == 6 && propagate_down[5]) { b_prop = true; if (accum[5]) { b_accum = true; } } this->copy_params_to_gradients(g_params, g_w_init, g_weight, g_bias, w_init_accum, w_accum, b_accum, propagate_down[3], w_prop, b_prop); } }
3b6366f82926f6eee002db199ae2d07715968809.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/cudnn/cudnn.hpp> #include <nbla/cuda/cudnn/function/lstm.hpp> #include <nbla/variable.hpp> #include <array> #include <random> namespace nbla { template <typename ARRAY> typename ARRAY::value_type array_product(const ARRAY &arr) { typename ARRAY::value_type p = 1; for (auto a : arr) { p *= a; } return p; } template <typename T> __global__ void kernel_forward_copy_weights(size_t size, const T *weight, T *param, int j_stride, int input_dim) { NBLA_CUDA_KERNEL_LOOP(i, size) { int stride; stride = (i / input_dim) * j_stride; param[i] = weight[i + stride]; } } template <typename T> __global__ void kernel_forward_copy_bias(size_t size, const T *bias, T *param) { NBLA_CUDA_KERNEL_LOOP(i, size) { param[i] = bias[i]; } } template <typename T> __global__ void kernel_accumulate_x_and_h(size_t size, const T *d_ptr, T *d) { NBLA_CUDA_KERNEL_LOOP(i, size) { d[i] += d_ptr[i]; } } template <typename T> __global__ void kernel_backward_copy_weights(size_t size, T *g_weight, T *g_param, int j_stride, int input_dim, bool accum) { NBLA_CUDA_KERNEL_LOOP(i, size) { int stride; stride = (i / input_dim) * j_stride; if (accum) { g_weight[i + stride] += g_param[i]; } else { g_weight[i + stride] = g_param[i]; } } } template <typename T> __global__ void kernel_backward_copy_bias(size_t size, T *g_bias, T *g_param, bool accum) { NBLA_CUDA_KERNEL_LOOP(i, size) { if (accum) { g_bias[i] += g_param[i]; } else { g_bias[i] = g_param[i]; } } } template <typename T> void LSTMCudaCudnn<T>::copy_weight_bias_to_params( Tcu *params, const Tcu *w_init, const Tcu *weight, const Tcu *bias, bool weight_exists, bool bias_exists) { for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_; layer_id++) { for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_; lin_layer_id++) { int param_index = layer_id * num_lin_layers_ + lin_layer_id; int inweight_offset = 0; if (layer_id / num_directions_ == 0) { if (lin_layer_id < 4) { inweight_offset = layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ + lin_layer_id * hidden_size_ * (input_dim_ + hidden_size_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_weights<Tcu>), weight_offsets_[param_index].second, w_init + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), hidden_size_, input_dim_); } else { inweight_offset = layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ + (lin_layer_id - 4) * hidden_size_ * (input_dim_ + hidden_size_) + input_dim_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_weights<Tcu>), weight_offsets_[param_index].second, w_init + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), input_dim_, hidden_size_) } } else { if (lin_layer_id < 4) { inweight_offset = (layer_id - num_directions_) * (num_directions_ * hidden_size_ + hidden_size_) * 4 * hidden_size_ + lin_layer_id * hidden_size_ * (num_directions_ * hidden_size_ + hidden_size_); if (this->num_layers_ > 1 && weight_exists) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_weights<Tcu>), weight_offsets_[param_index].second, weight + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), hidden_size_, num_directions_ * hidden_size_); } } else { inweight_offset = (layer_id - num_directions_) * (num_directions_ * hidden_size_ + hidden_size_) * 4 * hidden_size_ + (lin_layer_id - 4) * hidden_size_ * (num_directions_ * hidden_size_ + hidden_size_) + num_directions_ * hidden_size_; if (this->num_layers_ > 1 && weight_exists) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_weights<Tcu>), weight_offsets_[param_index].second, weight + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), num_directions_ * hidden_size_, hidden_size_); } } } if (bias_exists && bias && lin_layer_id < 4) { // copy only when lin_layer_id = 0, 1, 2, 3 NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_forward_copy_bias<Tcu>), bias_offsets_[param_index].second, bias + 4 * layer_id * hidden_size_ + lin_layer_id * hidden_size_, params + bias_offsets_[param_index].first / sizeof(T)); } } } } template <typename T> void LSTMCudaCudnn<T>::copy_params_to_gradients( Tcu *params, Tcu *w_init, Tcu *weight, Tcu *bias, bool w_init_accum, bool w_accum, bool b_accum, bool w_init_propagate, bool w_propagate, bool b_propagate) { for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_; layer_id++) { for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_; lin_layer_id++) { int param_index = layer_id * num_lin_layers_ + lin_layer_id; int inweight_offset = 0; if (layer_id / num_directions_ == 0) { if (lin_layer_id < 4) { inweight_offset = layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ + lin_layer_id * hidden_size_ * (input_dim_ + hidden_size_); if (w_init_propagate) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_weights<Tcu>), weight_offsets_[param_index].second, w_init + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), hidden_size_, input_dim_, w_init_accum); } } else { inweight_offset = layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ + (lin_layer_id - 4) * hidden_size_ * (input_dim_ + hidden_size_) + input_dim_; if (w_init_propagate) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_weights<Tcu>), weight_offsets_[param_index].second, w_init + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), input_dim_, hidden_size_, w_init_accum); } } } else { if (lin_layer_id < 4) { inweight_offset = (layer_id - num_directions_) * (num_directions_ * hidden_size_ + hidden_size_) * 4 * hidden_size_ + lin_layer_id * hidden_size_ * (num_directions_ * hidden_size_ + hidden_size_); if (w_propagate) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_weights<Tcu>), weight_offsets_[param_index].second, weight + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), hidden_size_, num_directions_ * hidden_size_, w_accum); } } else { inweight_offset = (layer_id - num_directions_) * (num_directions_ * hidden_size_ + hidden_size_) * 4 * hidden_size_ + (lin_layer_id - 4) * hidden_size_ * (num_directions_ * hidden_size_ + hidden_size_) + num_directions_ * hidden_size_; if (w_propagate) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_weights<Tcu>), weight_offsets_[param_index].second, weight + inweight_offset, params + weight_offsets_[param_index].first / sizeof(T), num_directions_ * hidden_size_, hidden_size_, w_accum); } } } if (bias && b_propagate && lin_layer_id < 4) { // copy only when lin_layer_id = 0, 1, 2, 3 NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_backward_copy_bias<Tcu>), bias_offsets_[param_index].second, bias + 4 * layer_id * hidden_size_ + lin_layer_id * hidden_size_, params + bias_offsets_[param_index].first / sizeof(T), b_accum); } } } } template <typename T> void LSTMCudaCudnn<T>::setup_impl(const Variables &inputs, const Variables &outputs) { // Create x descriptors and y descriptors by resizing // Set descriptors cuda_set_device(this->device_); auto cudnn_handle = SingletonManager::get<CudnnHandleManager>()->handle(this->device_); Shape_t inshape = inputs[0]->shape(); Shape_t hshape = inputs[1]->shape(); Shape_t cshape = inputs[2]->shape(); Shape_t outshape = outputs[0]->shape(); // Check input dimensions NBLA_CHECK(inputs[0]->ndim() == 3, error_code::value, "Input x must be a 3 dimensional array with a shape of (steps, " "batch_size, input_size)."); // Get input dimensions cudnnDataType_t dt_ = cudnn_data_type<T>::type(); seq_len_ = inshape[0]; int batch_size = inshape[1]; input_dim_ = inshape[2]; // Assuming this function takes h as (numLayer, numD, B, M) hidden_size_ = inputs[1]->shape()[3]; inputMode = CUDNN_LINEAR_INPUT; num_directions_ = this->bidirectional_ ? 2 : 1; direction = this->bidirectional_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL; RNNMode = CUDNN_LSTM; num_lin_layers_ = 8; // Check shape of h & c const char *error_msg_h = "Input h must be a 4 dimensional array with a " "shape of (num_layers, num_directions, batch_size, " "hidden_size)."; NBLA_CHECK(inputs[1]->ndim() == 4, error_code::value, error_msg_h); NBLA_CHECK(hshape[0] == this->num_layers_, error_code::value, error_msg_h); NBLA_CHECK(hshape[1] == num_directions_, error_code::value, error_msg_h); NBLA_CHECK(hshape[2] == batch_size, error_code::value, error_msg_h); NBLA_CHECK(hshape == cshape, error_code::value, "Input c must has the same shape as input h."); // Check weight shape at 0th layer Shape_t w0_shape = inputs[3]->shape(); const char *error_msg_w0 = "Input w0 must be a 4 dimensional array with a " "shape of (num_directions, 4, hidden_size, " "input_size + hidden_size)."; NBLA_CHECK(inputs[2]->ndim() == 4, error_code::value, error_msg_w0); NBLA_CHECK(w0_shape[0] == num_directions_, error_code::value, error_msg_w0); NBLA_CHECK(w0_shape[1] == 4, error_code::value, error_msg_w0); NBLA_CHECK(w0_shape[2] == hidden_size_, error_code::value, error_msg_w0); NBLA_CHECK(w0_shape[3] == hidden_size_ + input_dim_, error_code::value, error_msg_w0); weight_exists_ = true; bias_exists_ = true; if (inputs.size() == 4) { weight_exists_ = false; bias_exists_ = false; } else if (inputs.size() == 5) { Shape_t opt_shape = inputs[4]->shape(); if (this->num_layers_ > 1 && opt_shape.size() == 5) { bias_exists_ = false; } else if (this->num_layers_ > 1 && opt_shape.size() != 5) { NBLA_ERROR(error_code::value, "Weight argument must be passed when num_layers > 1"); } else if (this->num_layers_ == 1 && opt_shape.size() != 4) { NBLA_ERROR(error_code::value, "Weight argument cannot be passed when num_layers == 1"); } else if (this->num_layers_ == 1 && opt_shape.size() == 4) { weight_exists_ = false; } } else if ((inputs.size() > 5) && (this->num_layers_ == 1)) { NBLA_ERROR(error_code::value, "Weight argument cannot be passed when num_layers == 1"); } // Check weight shape if (weight_exists_) { Shape_t w_shape = inputs[4]->shape(); const char *error_msg_w = "Input w must be a 5 dimensional array with a " "shape of (num_layers - 1, num_directions, 4, " "hidden_size, num_directions * hidden_size + " "hidden_size)."; NBLA_CHECK(inputs[4]->ndim() == 5, error_code::value, error_msg_w); NBLA_CHECK(w_shape[0] == this->num_layers_ - 1, error_code::value, error_msg_w); NBLA_CHECK(w_shape[1] == num_directions_, error_code::value, error_msg_w); NBLA_CHECK(w_shape[2] == 4, error_code::value, error_msg_w); NBLA_CHECK(w_shape[3] == hidden_size_, error_code::value, error_msg_w); NBLA_CHECK(w_shape[4] == num_directions_ * hidden_size_ + hidden_size_, error_code::value, error_msg_w); } // Check bias shape if (bias_exists_) { const int b_index = weight_exists_ ? 5 : 4; Shape_t b_shape = inputs[b_index]->shape(); const char *error_msg_b = "Input b must be a 4 dimensional array with a " "shape of (num_layers, 4, num_directions, " "hidden_size)."; NBLA_CHECK(inputs[b_index]->ndim() == 4, error_code::value, error_msg_b); NBLA_CHECK(b_shape[0] == this->num_layers_, error_code::value, error_msg_b); NBLA_CHECK(b_shape[1] == num_directions_, error_code::value, error_msg_b); NBLA_CHECK(b_shape[2] == 4, error_code::value, error_msg_b); NBLA_CHECK(b_shape[3] == hidden_size_, error_code::value, error_msg_b); } // Set X desc // xdesc : T * (B, N, 1) // x : (T, B, N) row-major x_desc_.reset(new WCudnnTensorDescArray(seq_len_)); for (auto &x : x_desc_->desc_array()) { std::array<int, 3> dimA{batch_size, input_dim_, 1}; std::array<int, 3> strideA{input_dim_, 1, 1}; NBLA_CUDNN_CHECK(cudnnSetTensorNdDescriptor(x, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); } // Set hx and hy desc // hxDesc : (numLayer * numD, B, M) // hx : (numLayer, numD, B, M) row-major >>> or (numD, numLayer, B, M) // row-major // hyDesc : (numLayer * numD, B, M) // hy : (numLayer, numD, B, M) row-major >>> or (numD, numLayer, B, M) // row-major { std::array<int, 3> dimA{this->num_layers_ * num_directions_, batch_size, hidden_size_}; std::array<int, 3> strideA{batch_size * hidden_size_, hidden_size_, 1}; NBLA_CUDNN_CHECK( cudnnSetTensorNdDescriptor(h_desc_.desc, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); NBLA_CUDNN_CHECK( cudnnSetTensorNdDescriptor(h_n_desc_.desc, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); } // cx and cy { std::array<int, 3> dimA{this->num_layers_ * num_directions_, batch_size, hidden_size_}; std::array<int, 3> strideA{batch_size * hidden_size_, hidden_size_, 1}; NBLA_CUDNN_CHECK( cudnnSetTensorNdDescriptor(c_x_desc_.desc, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); NBLA_CUDNN_CHECK( cudnnSetTensorNdDescriptor(c_y_desc_.desc, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); } // Set Y desc // yDesc : T * (B, M * numD, 1) // y : (T, B, M, numD) row-major, >>> or (T, B, numD, M) y_desc_.reset(new WCudnnTensorDescArray(seq_len_)); for (auto &y : y_desc_->desc_array()) { std::array<int, 3> dimA{batch_size, hidden_size_ * num_directions_, 1}; std::array<int, 3> strideA{hidden_size_ * num_directions_, 1, 1}; NBLA_CUDNN_CHECK(cudnnSetTensorNdDescriptor(y, cudnn_data_type<T>::type(), dimA.size(), dimA.data(), strideA.data())); } // Get an RNN algorithm using cudnnGetRNNAlgorithm or cudnnFindRNNAlgorithm. // NOTE: find algorithm executes many algorithms exhaustively, and find a best // one. // Set dropout descriptor size_t dropout_stateSize; NBLA_CUDNN_CHECK(cudnnDropoutGetStatesSize(cudnn_handle, &dropout_stateSize)); state_array_ = make_shared<CudaCachedArray>(dropout_stateSize, dtypes::BYTE, this->ctx_); void *state_ptr = state_array_->pointer<void>(); std::random_device seed_gen; std::default_random_engine engine(seed_gen()); std::uniform_int_distribution<> dist(0, 999); NBLA_CUDNN_CHECK(cudnnSetDropoutDescriptor(dropout_desc_.desc, cudnn_handle, this->dropout_, state_ptr, dropout_stateSize, dist(engine))); // Set RNN descriptor. #if CUDNN_VERSION >= 7000 NBLA_CUDNN_CHECK(cudnnSetRNNDescriptor_v5( rnn_desc_.desc, hidden_size_, this->num_layers_, dropout_desc_.desc, inputMode, direction, RNNMode, dt_)); #else NBLA_CUDNN_CHECK(cudnnSetRNNDescriptor(rnn_desc_.desc, hidden_size_, this->num_layers_, dropout_desc_.desc, inputMode, direction, RNNMode, dt_)); #endif // Get workspace size and reserve size NBLA_CUDNN_CHECK(cudnnGetRNNWorkspaceSize(cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), &workspace_size_)); if (this->training_) { NBLA_CUDNN_CHECK( cudnnGetRNNTrainingReserveSize(cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), &reserve_size_)); } // Get number of pararameters both in bytes and in elements. NBLA_CUDNN_CHECK(cudnnGetRNNParamsSize(cudnn_handle, rnn_desc_.desc, x_desc_->data()[0], &params_size_in_bytes_, dt_)); total_params_ = params_size_in_bytes_ / sizeof(T); // Set params descriptor { std::array<int, 3> filter_dims{(int)total_params_, 1, 1}; NBLA_CUDNN_CHECK(cudnnSetFilterNdDescriptor( params_desc_.desc, cudnn_data_type<T>::type(), CUDNN_TENSOR_NCHW, 3, filter_dims.data())); } // Calculate address corerspondences between input parameters (weights and // biases) and flattened parameters buffer. // weight : [H, I+H] // bias : [H] // Temporary buffer. This is used only for getting address offsets of matrix // and biases from the head of the params pointer. CudaCachedArray params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); Tcu *params = params_array.pointer<Tcu>(); weight_offsets_.clear(); bias_offsets_.clear(); WCudnnFilterDesc lin_layer_mat_desc; for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_; layer_id++) { for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_; lin_layer_id++) { void *matrix_pointer; int nb_dims; cudnnDataType_t data_type; cudnnTensorFormat_t format; std::array<int, 3> dim; // Get an address pointing to a weight matrix corresponding layer_id and // linear_id, and its shape. NBLA_CUDNN_CHECK(cudnnGetRNNLinLayerMatrixParams( cudnn_handle, rnn_desc_.desc, layer_id, x_desc_->data()[0], params_desc_.desc, params, lin_layer_id, lin_layer_mat_desc.desc, &matrix_pointer)); NBLA_CUDNN_CHECK(cudnnGetFilterNdDescriptor(lin_layer_mat_desc.desc, 3, &data_type, &format, &nb_dims, dim.data())); // Size of the weight matrix can be obtained by a product of dim // elements. int weight_size = array_product(dim); weight_offsets_.push_back( {intptr_t(matrix_pointer) - intptr_t(params), weight_size}); // Get an address pointer of a bias vector corresponding to layer_id and // linear_id, and get its size. NBLA_CUDNN_CHECK(cudnnGetRNNLinLayerBiasParams( cudnn_handle, rnn_desc_.desc, layer_id, x_desc_->data()[0], params_desc_.desc, params, lin_layer_id, lin_layer_mat_desc.desc, &matrix_pointer)); NBLA_CUDNN_CHECK(cudnnGetFilterNdDescriptor(lin_layer_mat_desc.desc, 3, &data_type, &format, &nb_dims, dim.data())); // Size of the bias vector can be obtained by a product of dim elements. int bias_size = array_product(dim); bias_offsets_.push_back( {intptr_t(matrix_pointer) - intptr_t(params), bias_size}); } } // Set output shapes outputs[0]->reshape({seq_len_, batch_size, num_directions_ * hidden_size_}, true); outputs[1]->reshape(inputs[1]->shape(), true); outputs[2]->reshape(inputs[2]->shape(), true); } template <typename T> void LSTMCudaCudnn<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->training_) { // Training mode. forward_impl_training(inputs, outputs); } else { // Testing mode. forward_impl_inference(inputs, outputs); } } template <typename T> void LSTMCudaCudnn<T>::forward_impl_training(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto cudnn_handle = SingletonManager::get<CudnnHandleManager>()->handle(this->device_); // Inputs and outputs const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_); const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_); const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_); const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_); const Tcu *weight{nullptr}; const Tcu *bias{nullptr}; Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_); Tcu *h_n = outputs[1]->cast_data_and_get_pointer<Tcu>(this->ctx_); Tcu *c_n = outputs[2]->cast_data_and_get_pointer<Tcu>(this->ctx_); if (inputs.size() == 5) { if (weight_exists_) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } else if (bias_exists_) { bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } } if (inputs.size() > 5) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_); } // Create flattened weight buffer. CudaCachedArray params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); params_array.zero(); // Initialize params with 0 Tcu *params = params_array.pointer<Tcu>(); this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_, bias_exists_); shared_ptr<CudaCachedArray> mem_workspace{nullptr}; if (workspace_size_) { mem_workspace.reset( new CudaCachedArray(workspace_size_, dtypes::BYTE, this->ctx_)); } if (mem_reservespace_) { NBLA_CHECK(mem_reservespace_->size() == reserve_size_, error_code::value, "reserve_size_ is inconsistent with the previously set " "reservespace size."); } mem_reservespace_.reset( new CudaCachedArray(reserve_size_, dtypes::BYTE, this->ctx_)); auto alpha = get_cudnn_scalar_arg<T>(1); auto beta = get_cudnn_scalar_arg<T>(0); NBLA_CUDNN_CHECK(cudnnRNNForwardTraining( cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc, h, c_x_desc_.desc, c, params_desc_.desc, params, y_desc_->data(), y, h_n_desc_.desc, h_n, c_y_desc_.desc, c_n, mem_workspace->pointer(), workspace_size_, mem_reservespace_->pointer(), reserve_size_)); } template <typename T> void LSTMCudaCudnn<T>::forward_impl_inference(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto cudnn_handle = SingletonManager::get<CudnnHandleManager>()->handle(this->device_); const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_); const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_); const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_); const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_); const Tcu *weight{nullptr}; const Tcu *bias{nullptr}; Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_); Tcu *h_n = outputs[1]->cast_data_and_get_pointer<Tcu>(this->ctx_); Tcu *c_n = outputs[2]->cast_data_and_get_pointer<Tcu>(this->ctx_); if (inputs.size() == 5) { if (weight_exists_) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } else if (bias_exists_) { bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } } if (inputs.size() > 5) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_); } // Create flattened weight buffer. CudaCachedArray params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); params_array.zero(); // Initialize params with 0 Tcu *params = params_array.pointer<Tcu>(); this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_, bias_exists_); shared_ptr<CudaCachedArray> mem_workspace{nullptr}; if (workspace_size_) { mem_workspace.reset( new CudaCachedArray(workspace_size_, dtypes::BYTE, this->ctx_)); } NBLA_CUDNN_CHECK(cudnnRNNForwardInference( cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc, h, c_x_desc_.desc, c, params_desc_.desc, params, y_desc_->data(), y, h_n_desc_.desc, h_n, c_y_desc_.desc, c_n, mem_workspace ? mem_workspace->pointer() : nullptr, workspace_size_)); } template <typename T> void LSTMCudaCudnn<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || propagate_down[1] || propagate_down[2] || propagate_down[3] || (inputs.size() > 4 && propagate_down[4]) || (inputs.size() > 5 && propagate_down[5]))) { return; } NBLA_CHECK(this->training_, error_code::value, "Backward is called for training only."); NBLA_CHECK(mem_reservespace_, error_code::value, "Reserve space should be allocated memory space."); NBLA_CHECK(mem_reservespace_->size() == reserve_size_, error_code::value, "reserve_size_ is inconsistent with the previously set " "reservespace size."); if (inputs.size() > 5 && propagate_down[5]) { NBLA_CHECK(propagate_down[3] == propagate_down[4], error_code::value, "If bias is backpropagated, so should weights."); } cuda_set_device(this->device_); auto cudnn_handle = SingletonManager::get<CudnnHandleManager>()->handle(this->device_); const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_); const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_); const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_); const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_); const Tcu *weight{nullptr}; const Tcu *bias{nullptr}; const Tcu *g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); const Tcu *g_h_n = outputs[1]->get_grad_pointer<Tcu>(this->ctx_); const Tcu *g_c_n = outputs[2]->get_grad_pointer<Tcu>(this->ctx_); if (inputs.size() == 5) { if (weight_exists_) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } else if (bias_exists_) { bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_); } } if (inputs.size() > 5) { weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_); bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_); } const Tcu *y = outputs[0]->get_data_pointer<Tcu>(this->ctx_); const Tcu *h_n = outputs[1]->get_data_pointer<Tcu>(this->ctx_); const Tcu *c_n = outputs[2]->get_data_pointer<Tcu>(this->ctx_); Tcu *g_x{nullptr}; Tcu *g_h{nullptr}; Tcu *g_c{nullptr}; Tcu *g_w_init{nullptr}; Tcu *g_weight{nullptr}; Tcu *g_bias{nullptr}; CudaCachedArray params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); CudaCachedArray g_params_array(params_size_in_bytes_, dtypes::BYTE, this->ctx_); params_array.zero(); // Initialize params with 0 g_params_array.zero(); Tcu *params = params_array.pointer<Tcu>(); Tcu *g_params = g_params_array.pointer<Tcu>(); this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_, bias_exists_); if (propagate_down[0]) { g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]); } if (propagate_down[1]) { g_h = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]); } if (propagate_down[2]) { g_c = inputs[2]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[2]); } if (propagate_down[3]) { g_w_init = inputs[3]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[3]); } if (inputs.size() == 5 && propagate_down[4]) { if (weight_exists_) { g_weight = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]); } else if (bias_exists_) { g_bias = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]); } } if (inputs.size() == 6 && propagate_down[4]) { g_weight = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]); } if (inputs.size() == 6 && propagate_down[5]) { g_bias = inputs[5]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[5]); } shared_ptr<CudaCachedArray> mem_workspace{nullptr}; if (workspace_size_) { mem_workspace.reset( new CudaCachedArray(workspace_size_, dtypes::BYTE, this->ctx_)); } shared_ptr<CudaCachedArray> mem_x_accum{nullptr}; shared_ptr<CudaCachedArray> mem_h_accum{nullptr}; shared_ptr<CudaCachedArray> mem_c_accum{nullptr}; Tcu *dx_tmp = g_x; Tcu *dh_tmp = g_h; Tcu *dc_tmp = g_c; if (!propagate_down[0] || accum[0]) { mem_x_accum.reset(new CudaCachedArray(inputs[0]->size() * sizeof(Tcu), dtypes::BYTE, this->ctx_)); dx_tmp = mem_x_accum->pointer<Tcu>(); } if (!propagate_down[1] || accum[1]) { mem_h_accum.reset(new CudaCachedArray(inputs[1]->size() * sizeof(Tcu), dtypes::BYTE, this->ctx_)); dh_tmp = mem_h_accum->pointer<Tcu>(); } if (!propagate_down[2] || accum[2]) { mem_c_accum.reset(new CudaCachedArray(inputs[2]->size() * sizeof(Tcu), dtypes::BYTE, this->ctx_)); dc_tmp = mem_c_accum->pointer<Tcu>(); } NBLA_CUDNN_CHECK(cudnnRNNBackwardData( cudnn_handle, rnn_desc_.desc, seq_len_, y_desc_->data(), y, y_desc_->data(), g_y, h_n_desc_.desc, g_h_n, c_y_desc_.desc, g_c_n, params_desc_.desc, params, h_desc_.desc, h, c_x_desc_.desc, c, x_desc_->data(), dx_tmp, h_desc_.desc, dh_tmp, c_x_desc_.desc, dc_tmp, mem_workspace->pointer(), workspace_size_, mem_reservespace_->pointer(), reserve_size_)); if (propagate_down[0] && accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>), inputs[0]->size(), dx_tmp, g_x); } if (propagate_down[1] && accum[1]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>), inputs[1]->size(), dh_tmp, g_h); } if (propagate_down[2] && accum[2]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>), inputs[3]->size(), dc_tmp, g_c); } if (propagate_down[3] || (inputs.size() > 4 && propagate_down[4]) || (inputs.size() == 6 && propagate_down[5])) { NBLA_CUDNN_CHECK(cudnnRNNBackwardWeights( cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc, h, y_desc_->data(), y, mem_workspace->pointer(), workspace_size_, params_desc_.desc, g_params, mem_reservespace_->pointer(), reserve_size_)); } bool w_init_accum = false; bool w_accum = false; bool b_accum = false; bool w_prop = false; bool b_prop = false; if (propagate_down[3] && accum[3]) { w_init_accum = true; } if (inputs.size() > 4 && propagate_down[4]) { if (inputs.size() == 5 && weight_exists_) { w_prop = true; if (accum[4]) { w_accum = true; } } else if (inputs.size() == 5 && bias_exists_) { b_prop = true; if (accum[4]) { b_accum = true; } } else { w_prop = true; if (accum[4]) { w_accum = true; } } } if (inputs.size() == 6 && propagate_down[5]) { b_prop = true; if (accum[5]) { b_accum = true; } } this->copy_params_to_gradients(g_params, g_w_init, g_weight, g_bias, w_init_accum, w_accum, b_accum, propagate_down[3], w_prop, b_prop); } }
bbe066164c24c3e841ffb7892a2450b80a947b2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" template <typename T> __global__ void kernelgpuFlux(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i<ng) { T param1 = param[0]; T param2 = param[1]; T param3 = param[2]; T param4 = param[3]; T param5 = param[4]; T param6 = param[5]; T param7 = param[6]; T param8 = param[7]; T param9 = param[8]; T param10 = param[9]; T param11 = param[10]; T param12 = param[11]; T param13 = param[12]; T param14 = param[13]; T param15 = param[14]; T param16 = param[15]; T param17 = param[16]; T uinf1 = uinf[0]; T uinf2 = uinf[1]; T xdg1 = xdg[0*ng+i]; T xdg2 = xdg[1*ng+i]; T udg1 = udg[0*ng+i]; T udg2 = udg[1*ng+i]; T udg3 = udg[2*ng+i]; T udg4 = udg[3*ng+i]; T udg5 = udg[4*ng+i]; T udg6 = udg[5*ng+i]; T udg7 = udg[6*ng+i]; T udg8 = udg[7*ng+i]; T udg9 = udg[8*ng+i]; T udg10 = udg[9*ng+i]; T udg11 = udg[10*ng+i]; T udg12 = udg[11*ng+i]; T odg1 = odg[0*ng+i]; T odg2 = odg[1*ng+i]; T t2 = 1.0/3.141592653589793; T t3 = udg1*1.0E3; T t4 = t3-1.0E1; T t5 = atan(t4); T t6 = t2*t5; T t7 = t6+1.0/2.0; T t8 = udg1-1.0/1.0E2; T t9 = t7*t8*1.0E3; T t10 = t9+3.183097800805168E-1; T t11 = t7*t8; T t12 = t11+3.183097800805168E-4; T t13 = t11+1.031830978008052E-2; T t14 = 1.0/t13; T t15 = atan(t10); T t16 = t2*t15; T t17 = t12*t12; T t18 = t17*1.0E6; T t19 = t18+1.0; T t20 = 1.0/t19; T t21 = t2*t10*t20; T t22 = t16+t21+1.0/2.0; T t23 = 1.0/(t13*t13); T t24 = udg2*udg2; T t25 = t23*t24*(1.0/2.0); T t26 = udg3*udg3; T t27 = t23*t26*(1.0/2.0); T t28 = t25+t27; T t34 = t13*t28; T t29 = -t34+udg4; T t30 = param1-1.0; T t52 = t14*t22*udg2*udg5; T t31 = -t52+udg6; T t54 = t14*t22*udg3*udg9; T t32 = -t54+udg11; T t33 = t14*t32; T t35 = t29*t30*1.0E3; T t36 = t35-1.0; T t37 = atan(t36); T t38 = t2*t37; T t39 = t38+1.0/2.0; T t40 = t29*t30; T t41 = t40-1.0/1.0E3; T t42 = t39*t41; T t43 = param4*param4; T t44 = t42+1.318309780080517E-3; T t45 = 1.0/param2; T t46 = param1*param10*t14*t43*t44; T t47 = t46+5.52E2/5.0; T t48 = 1.0/t47; T t49 = param10+5.52E2/5.0; T t50 = param1*t14*t43*t44; T t51 = pow(t50,3.0/2.0); T t53 = t14*t31; T t55 = t33+t53; T t56 = odg1*t55; T t57 = odg1*param14*(2.0/3.0); T t58 = t45*t48*t49*t51*(2.0/3.0); T t59 = t57+t58; T t60 = t33-t14*t31*2.0; T t72 = t14*t22*udg3*udg5; T t61 = -t72+udg7; T t62 = t14*t61; T t73 = t14*t22*udg2*udg9; T t63 = -t73+udg10; T t64 = t14*t63; T t65 = t62+t64; T t66 = odg1*param14; T t67 = t45*t48*t49*t51; T t68 = t66+t67; T t69 = t42+3.183097800805168E-4; T t70 = t39*t41*1.0E3; T t71 = t70+3.183097800805168E-1; T t74 = t65*t68; T t75 = t14*udg2*udg3; T t76 = t74+t75; T t77 = t14*udg4; T t78 = t14*t44; T t79 = t77+t78; T t80 = t53-t14*t32*2.0; T t81 = odg1*param13; T t82 = 1.0/param3; T t83 = param1*t68*t82; T t84 = t81+t83; T t85 = atan(t71); T t86 = t2*t85; T t87 = t69*t69; T t88 = t87*1.0E6; T t89 = t88+1.0; T t90 = 1.0/t89; T t91 = t2*t71*t90; T t92 = t86+t91+1.0/2.0; T t93 = 1.0/t30; f[0*ng+i] = udg2; f[1*ng+i] = t42+t56+t14*t24-t59*t60+1.318309780080517E-3; f[2*ng+i] = t76; f[3*ng+i] = t79*udg2+t14*udg2*(t56-t59*t60)-t23*t84*t93*(t22*t44*udg5+t13*t30*t92*(-udg8+t13*(t23*t31*udg2+t23*t61*udg3)+t22*t28*udg5))+t14*t65*t68*udg3; f[4*ng+i] = udg3; f[5*ng+i] = t76; f[6*ng+i] = t42+t56+t14*t26-t59*t80+1.318309780080517E-3; f[7*ng+i] = t79*udg3+t14*udg3*(t56-t59*t80)-t23*t84*t93*(t22*t44*udg9+t13*t30*t92*(-udg12+t13*(t23*t32*udg3+t23*t63*udg2)+t22*t28*udg9))+t14*t65*t68*udg2; i += blockDim.x * gridDim.x; } } template <typename T> void gpuFlux(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw) { int blockDim = 256; int gridDim = (ng + blockDim - 1) / blockDim; gridDim = (gridDim>1024)? 1024 : gridDim; hipLaunchKernelGGL(( kernelgpuFlux), dim3(gridDim), dim3(blockDim), 0, 0, f, xdg, udg, odg, wdg, uinf, param, time, ng, nc, ncu, nd, ncx, nco, ncw); } template void gpuFlux(double *, double *, double *, double *, double *, double *, double *, double, int, int, int, int, int, int, int); template void gpuFlux(float *, float *, float *, float *, float *, float *, float *, float, int, int, int, int, int, int, int);
bbe066164c24c3e841ffb7892a2450b80a947b2d.cu
template <typename T> __global__ void kernelgpuFlux(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i<ng) { T param1 = param[0]; T param2 = param[1]; T param3 = param[2]; T param4 = param[3]; T param5 = param[4]; T param6 = param[5]; T param7 = param[6]; T param8 = param[7]; T param9 = param[8]; T param10 = param[9]; T param11 = param[10]; T param12 = param[11]; T param13 = param[12]; T param14 = param[13]; T param15 = param[14]; T param16 = param[15]; T param17 = param[16]; T uinf1 = uinf[0]; T uinf2 = uinf[1]; T xdg1 = xdg[0*ng+i]; T xdg2 = xdg[1*ng+i]; T udg1 = udg[0*ng+i]; T udg2 = udg[1*ng+i]; T udg3 = udg[2*ng+i]; T udg4 = udg[3*ng+i]; T udg5 = udg[4*ng+i]; T udg6 = udg[5*ng+i]; T udg7 = udg[6*ng+i]; T udg8 = udg[7*ng+i]; T udg9 = udg[8*ng+i]; T udg10 = udg[9*ng+i]; T udg11 = udg[10*ng+i]; T udg12 = udg[11*ng+i]; T odg1 = odg[0*ng+i]; T odg2 = odg[1*ng+i]; T t2 = 1.0/3.141592653589793; T t3 = udg1*1.0E3; T t4 = t3-1.0E1; T t5 = atan(t4); T t6 = t2*t5; T t7 = t6+1.0/2.0; T t8 = udg1-1.0/1.0E2; T t9 = t7*t8*1.0E3; T t10 = t9+3.183097800805168E-1; T t11 = t7*t8; T t12 = t11+3.183097800805168E-4; T t13 = t11+1.031830978008052E-2; T t14 = 1.0/t13; T t15 = atan(t10); T t16 = t2*t15; T t17 = t12*t12; T t18 = t17*1.0E6; T t19 = t18+1.0; T t20 = 1.0/t19; T t21 = t2*t10*t20; T t22 = t16+t21+1.0/2.0; T t23 = 1.0/(t13*t13); T t24 = udg2*udg2; T t25 = t23*t24*(1.0/2.0); T t26 = udg3*udg3; T t27 = t23*t26*(1.0/2.0); T t28 = t25+t27; T t34 = t13*t28; T t29 = -t34+udg4; T t30 = param1-1.0; T t52 = t14*t22*udg2*udg5; T t31 = -t52+udg6; T t54 = t14*t22*udg3*udg9; T t32 = -t54+udg11; T t33 = t14*t32; T t35 = t29*t30*1.0E3; T t36 = t35-1.0; T t37 = atan(t36); T t38 = t2*t37; T t39 = t38+1.0/2.0; T t40 = t29*t30; T t41 = t40-1.0/1.0E3; T t42 = t39*t41; T t43 = param4*param4; T t44 = t42+1.318309780080517E-3; T t45 = 1.0/param2; T t46 = param1*param10*t14*t43*t44; T t47 = t46+5.52E2/5.0; T t48 = 1.0/t47; T t49 = param10+5.52E2/5.0; T t50 = param1*t14*t43*t44; T t51 = pow(t50,3.0/2.0); T t53 = t14*t31; T t55 = t33+t53; T t56 = odg1*t55; T t57 = odg1*param14*(2.0/3.0); T t58 = t45*t48*t49*t51*(2.0/3.0); T t59 = t57+t58; T t60 = t33-t14*t31*2.0; T t72 = t14*t22*udg3*udg5; T t61 = -t72+udg7; T t62 = t14*t61; T t73 = t14*t22*udg2*udg9; T t63 = -t73+udg10; T t64 = t14*t63; T t65 = t62+t64; T t66 = odg1*param14; T t67 = t45*t48*t49*t51; T t68 = t66+t67; T t69 = t42+3.183097800805168E-4; T t70 = t39*t41*1.0E3; T t71 = t70+3.183097800805168E-1; T t74 = t65*t68; T t75 = t14*udg2*udg3; T t76 = t74+t75; T t77 = t14*udg4; T t78 = t14*t44; T t79 = t77+t78; T t80 = t53-t14*t32*2.0; T t81 = odg1*param13; T t82 = 1.0/param3; T t83 = param1*t68*t82; T t84 = t81+t83; T t85 = atan(t71); T t86 = t2*t85; T t87 = t69*t69; T t88 = t87*1.0E6; T t89 = t88+1.0; T t90 = 1.0/t89; T t91 = t2*t71*t90; T t92 = t86+t91+1.0/2.0; T t93 = 1.0/t30; f[0*ng+i] = udg2; f[1*ng+i] = t42+t56+t14*t24-t59*t60+1.318309780080517E-3; f[2*ng+i] = t76; f[3*ng+i] = t79*udg2+t14*udg2*(t56-t59*t60)-t23*t84*t93*(t22*t44*udg5+t13*t30*t92*(-udg8+t13*(t23*t31*udg2+t23*t61*udg3)+t22*t28*udg5))+t14*t65*t68*udg3; f[4*ng+i] = udg3; f[5*ng+i] = t76; f[6*ng+i] = t42+t56+t14*t26-t59*t80+1.318309780080517E-3; f[7*ng+i] = t79*udg3+t14*udg3*(t56-t59*t80)-t23*t84*t93*(t22*t44*udg9+t13*t30*t92*(-udg12+t13*(t23*t32*udg3+t23*t63*udg2)+t22*t28*udg9))+t14*t65*t68*udg2; i += blockDim.x * gridDim.x; } } template <typename T> void gpuFlux(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw) { int blockDim = 256; int gridDim = (ng + blockDim - 1) / blockDim; gridDim = (gridDim>1024)? 1024 : gridDim; kernelgpuFlux<<<gridDim, blockDim>>>(f, xdg, udg, odg, wdg, uinf, param, time, ng, nc, ncu, nd, ncx, nco, ncw); } template void gpuFlux(double *, double *, double *, double *, double *, double *, double *, double, int, int, int, int, int, int, int); template void gpuFlux(float *, float *, float *, float *, float *, float *, float *, float, int, int, int, int, int, int, int);
ec8a332544ba20392f2add0442d950984695558a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> int main(int argc, char *argv[]) { int a; int b; int *ptx; int *pty; int *pttmp; printf("Pointer Example Program : Print Pointer Address\n"); a = 10; b = 11; ptx = &a; pty = &b; printf("\n[ptx ]:Value of ptx = %p", ptx); printf("\n[ptx ]:Value of pty = %p", pty); pttmp = ptx; ptx = pty; pty = pttmp; // printf("\n[a ]:Value of A = %d", a); // printf("\n[*ptx]:Value of A = %d", *ptx); // printf("\n[&a ]:Address of A = %p", &a); // printf("\n[ptx ]:Address of A = %p", ptx); // printf("\n[&ptx]:Address of ptx = %p", &ptx); printf("\n[ptx ]:Value of ptx = %p", ptx); printf("\n[ptx ]:Value of pty = %p", pty); return 0; }
ec8a332544ba20392f2add0442d950984695558a.cu
#include <cuda.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> int main(int argc, char *argv[]) { int a; int b; int *ptx; int *pty; int *pttmp; printf("Pointer Example Program : Print Pointer Address\n"); a = 10; b = 11; ptx = &a; pty = &b; printf("\n[ptx ]:Value of ptx = %p", ptx); printf("\n[ptx ]:Value of pty = %p", pty); pttmp = ptx; ptx = pty; pty = pttmp; // printf("\n[a ]:Value of A = %d", a); // printf("\n[*ptx]:Value of A = %d", *ptx); // printf("\n[&a ]:Address of A = %p", &a); // printf("\n[ptx ]:Address of A = %p", ptx); // printf("\n[&ptx]:Address of ptx = %p", &ptx); printf("\n[ptx ]:Value of ptx = %p", ptx); printf("\n[ptx ]:Value of pty = %p", pty); return 0; }
013f9e991107ba92b3af80d6485c770e55655acb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sp_helper.h" #define THREADS_PER_BLOCK 512 #include <stdio.h> __host__ void CudaInitSpParams(superpixel_params* sp_params, const int s_std, const int i_std, const int nSPs, int nSPs_buffer, int nPixels){ int num_block = ceil( double(nSPs) / double(THREADS_PER_BLOCK) ); //Roy- TO Change dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); hipLaunchKernelGGL(( InitSpParams), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, sp_params,s_std,i_std,nSPs,nSPs_buffer, nPixels); } __global__ void InitSpParams(superpixel_params* sp_params, const int s_std, const int i_std, const int nSPs, int nSPs_buffer, int nPixels) { // the label int k = threadIdx.x + blockIdx.x * blockDim.x; if (k>=nSPs_buffer) return; double s_std_square = double(s_std) * double(s_std); // calculate the inverse of covariance double3 sigma_s_local; sigma_s_local.x = 1.0/s_std_square; sigma_s_local.y = 0.0; sigma_s_local.z = 1.0/s_std_square; sp_params[k].sigma_s = sigma_s_local; sp_params[k].prior_count = nPixels/nSPs; if(k>=nSPs) { sp_params[k].count = 0; float3 mu_i; mu_i.x = -999; mu_i.y = -999; mu_i.z = -999; sp_params[k].mu_i = mu_i; double2 mu_s; mu_s.x = -999; mu_s.y = -999; sp_params[k].mu_s = mu_s; sp_params[k].valid = 0; } else sp_params[k].valid = 1; // calculate the log of the determinant of covariance sp_params[k].logdet_Sigma_s = log(s_std_square * s_std_square); } __host__ void CUDA_get_image_overlaid(uchar3* image, const bool* border, const int nPixels, const int xdim){ int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) ); dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); hipLaunchKernelGGL(( GetImageOverlaid), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, image, border, nPixels, xdim); } __global__ void GetImageOverlaid(uchar3* image, const bool* border, const int nPixels, const int xdim){ int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; if (border[t]){ //change the color value to red uchar3 p; p.x = 0; p.y = 0; p.z = 255.0; image[t] = p; } } __host__ void CUDA_get_image_cartoon(uchar3* image_mean_gpu, const int* seg, const superpixel_params* sp_params, const int nPixels){ int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) ); dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); hipLaunchKernelGGL(( GetImageCartoon), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, image_mean_gpu, seg, sp_params, nPixels); } __global__ void GetImageCartoon(uchar3* image_mean_gpu, const int* seg, const superpixel_params* sp_params, const int nPixels){ //each each pixel, find the mu_i_lab; int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; int k = seg[t]; // convert mu_i_lab to mu_i_rgb double L = double(sp_params[k].mu_i.x * (-100)); double La = double(sp_params[k].mu_i.y * 100); double Lb = double(sp_params[k].mu_i.z * 100); if (L!=L || La!=La || Lb!=Lb) return; //convert from LAB to XYZ double fy = (L+16) / 116; double fx = La/500 + fy; double fz = fy-Lb/200; double x,y,z; double xcube = powf(fx,3); double ycube = powf(fy,3); double zcube = powf(fz,3); if (ycube>0.008856) y = ycube; else y = (fy-16.0/116.0)/7.787; if (xcube>0.008856) x = xcube; else x = (fx - 16.0/116.0)/7.787; if (zcube>0.008856) z = zcube; else z = (fz - 16.0/116.0)/7.787; double X = 0.950456 * x; double Y = 1.000 * y; double Z = 1.088754 * z; //convert from XYZ to rgb double R = X * 3.2406 + Y * -1.5372 + Z * -0.4986; double G = X * -0.9689 + Y * 1.8758 + Z * 0.0415; double B = X * 0.0557 + Y * -0.2040 + Z * 1.0570; double r,g,b; if (R>0.0031308) r = 1.055 * (powf(R,(1.0/2.4))) - 0.055; else r = 12.92 * R; if (G>0.0031308) g = 1.055 * ( powf(G,(1.0/2.4))) - 0.055; else g= 12.92 * G; if (B>0.0031308) b = 1.055 * (powf(B, (1.0/2.4))) - 0.055; else b = 12.92 * B; uchar3 p; p.x = min(255.0, b * 255.0); p.y = min(255.0, g * 255.0); p.z = min(255.0, r * 255.0); p.x = max(0.0, double(p.x)); p.y = max(0.0, double(p.y)); p.z = max(0.0, double(p.z)); image_mean_gpu[t] = p; return; }
013f9e991107ba92b3af80d6485c770e55655acb.cu
#include "sp_helper.h" #define THREADS_PER_BLOCK 512 #include <stdio.h> __host__ void CudaInitSpParams(superpixel_params* sp_params, const int s_std, const int i_std, const int nSPs, int nSPs_buffer, int nPixels){ int num_block = ceil( double(nSPs) / double(THREADS_PER_BLOCK) ); //Roy- TO Change dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); InitSpParams<<<BlockPerGrid,ThreadPerBlock>>>(sp_params,s_std,i_std,nSPs,nSPs_buffer, nPixels); } __global__ void InitSpParams(superpixel_params* sp_params, const int s_std, const int i_std, const int nSPs, int nSPs_buffer, int nPixels) { // the label int k = threadIdx.x + blockIdx.x * blockDim.x; if (k>=nSPs_buffer) return; double s_std_square = double(s_std) * double(s_std); // calculate the inverse of covariance double3 sigma_s_local; sigma_s_local.x = 1.0/s_std_square; sigma_s_local.y = 0.0; sigma_s_local.z = 1.0/s_std_square; sp_params[k].sigma_s = sigma_s_local; sp_params[k].prior_count = nPixels/nSPs; if(k>=nSPs) { sp_params[k].count = 0; float3 mu_i; mu_i.x = -999; mu_i.y = -999; mu_i.z = -999; sp_params[k].mu_i = mu_i; double2 mu_s; mu_s.x = -999; mu_s.y = -999; sp_params[k].mu_s = mu_s; sp_params[k].valid = 0; } else sp_params[k].valid = 1; // calculate the log of the determinant of covariance sp_params[k].logdet_Sigma_s = log(s_std_square * s_std_square); } __host__ void CUDA_get_image_overlaid(uchar3* image, const bool* border, const int nPixels, const int xdim){ int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) ); dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); GetImageOverlaid<<<BlockPerGrid,ThreadPerBlock>>>(image, border, nPixels, xdim); } __global__ void GetImageOverlaid(uchar3* image, const bool* border, const int nPixels, const int xdim){ int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; if (border[t]){ //change the color value to red uchar3 p; p.x = 0; p.y = 0; p.z = 255.0; image[t] = p; } } __host__ void CUDA_get_image_cartoon(uchar3* image_mean_gpu, const int* seg, const superpixel_params* sp_params, const int nPixels){ int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) ); dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); GetImageCartoon<<<BlockPerGrid,ThreadPerBlock>>>(image_mean_gpu, seg, sp_params, nPixels); } __global__ void GetImageCartoon(uchar3* image_mean_gpu, const int* seg, const superpixel_params* sp_params, const int nPixels){ //each each pixel, find the mu_i_lab; int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; int k = seg[t]; // convert mu_i_lab to mu_i_rgb double L = double(sp_params[k].mu_i.x * (-100)); double La = double(sp_params[k].mu_i.y * 100); double Lb = double(sp_params[k].mu_i.z * 100); if (L!=L || La!=La || Lb!=Lb) return; //convert from LAB to XYZ double fy = (L+16) / 116; double fx = La/500 + fy; double fz = fy-Lb/200; double x,y,z; double xcube = powf(fx,3); double ycube = powf(fy,3); double zcube = powf(fz,3); if (ycube>0.008856) y = ycube; else y = (fy-16.0/116.0)/7.787; if (xcube>0.008856) x = xcube; else x = (fx - 16.0/116.0)/7.787; if (zcube>0.008856) z = zcube; else z = (fz - 16.0/116.0)/7.787; double X = 0.950456 * x; double Y = 1.000 * y; double Z = 1.088754 * z; //convert from XYZ to rgb double R = X * 3.2406 + Y * -1.5372 + Z * -0.4986; double G = X * -0.9689 + Y * 1.8758 + Z * 0.0415; double B = X * 0.0557 + Y * -0.2040 + Z * 1.0570; double r,g,b; if (R>0.0031308) r = 1.055 * (powf(R,(1.0/2.4))) - 0.055; else r = 12.92 * R; if (G>0.0031308) g = 1.055 * ( powf(G,(1.0/2.4))) - 0.055; else g= 12.92 * G; if (B>0.0031308) b = 1.055 * (powf(B, (1.0/2.4))) - 0.055; else b = 12.92 * B; uchar3 p; p.x = min(255.0, b * 255.0); p.y = min(255.0, g * 255.0); p.z = min(255.0, r * 255.0); p.x = max(0.0, double(p.x)); p.y = max(0.0, double(p.y)); p.z = max(0.0, double(p.z)); image_mean_gpu[t] = p; return; }
a73310531fb8ffb3c8f52872bbac5cfdeb73e158.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2013, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "telescope/station/oskar_evaluate_vla_beam_pbcor_cuda.h" #include "telescope/station/oskar_vla_pbcor_inline.h" #ifdef __cplusplus extern "C" { #endif /* Kernel wrappers. ======================================================== */ /* Single precision. */ void oskar_evaluate_vla_beam_pbcor_cuda_f(float* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } void oskar_evaluate_vla_beam_pbcor_complex_cuda_f(float2* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_complex_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } void oskar_evaluate_vla_beam_pbcor_matrix_cuda_f(float4c* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_matrix_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } /* Double precision. */ void oskar_evaluate_vla_beam_pbcor_cuda_d(double* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } void oskar_evaluate_vla_beam_pbcor_complex_cuda_d(double2* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_complex_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } void oskar_evaluate_vla_beam_pbcor_matrix_cuda_d(double4c* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_matrix_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } #ifdef __cplusplus } #endif /* Kernels. ================================================================ */ /* Single precision. */ __global__ void oskar_evaluate_vla_beam_pbcor_cudak_f(float* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; beam[i] = oskar_vla_pbcor_inline_f(l[i], m[i], freq_ghz, p1, p2, p3); } __global__ void oskar_evaluate_vla_beam_pbcor_complex_cudak_f(float2* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; beam[i].x = oskar_vla_pbcor_inline_f(l[i], m[i], freq_ghz, p1, p2, p3); beam[i].y = 0.0f; } __global__ void oskar_evaluate_vla_beam_pbcor_matrix_cudak_f(float4c* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { float t; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; t = oskar_vla_pbcor_inline_f(l[i], m[i], freq_ghz, p1, p2, p3); beam[i].a.x = t; beam[i].a.y = 0.0f; beam[i].b.x = 0.0f; beam[i].b.y = 0.0f; beam[i].c.x = 0.0f; beam[i].c.y = 0.0f; beam[i].d.x = t; beam[i].d.y = 0.0f; } /* Double precision. */ __global__ void oskar_evaluate_vla_beam_pbcor_cudak_d(double* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; beam[i] = oskar_vla_pbcor_inline_d(l[i], m[i], freq_ghz, p1, p2, p3); } __global__ void oskar_evaluate_vla_beam_pbcor_complex_cudak_d(double2* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; beam[i].x = oskar_vla_pbcor_inline_d(l[i], m[i], freq_ghz, p1, p2, p3); beam[i].y = 0.0; } __global__ void oskar_evaluate_vla_beam_pbcor_matrix_cudak_d(double4c* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { double t; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; t = oskar_vla_pbcor_inline_d(l[i], m[i], freq_ghz, p1, p2, p3); beam[i].a.x = t; beam[i].a.y = 0.0; beam[i].b.x = 0.0; beam[i].b.y = 0.0; beam[i].c.x = 0.0; beam[i].c.y = 0.0; beam[i].d.x = t; beam[i].d.y = 0.0; }
a73310531fb8ffb3c8f52872bbac5cfdeb73e158.cu
/* * Copyright (c) 2013, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "telescope/station/oskar_evaluate_vla_beam_pbcor_cuda.h" #include "telescope/station/oskar_vla_pbcor_inline.h" #ifdef __cplusplus extern "C" { #endif /* Kernel wrappers. ======================================================== */ /* Single precision. */ void oskar_evaluate_vla_beam_pbcor_cuda_f(float* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } void oskar_evaluate_vla_beam_pbcor_complex_cuda_f(float2* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_complex_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } void oskar_evaluate_vla_beam_pbcor_matrix_cuda_f(float4c* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_matrix_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } /* Double precision. */ void oskar_evaluate_vla_beam_pbcor_cuda_d(double* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } void oskar_evaluate_vla_beam_pbcor_complex_cuda_d(double2* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_complex_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } void oskar_evaluate_vla_beam_pbcor_matrix_cuda_d(double4c* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_evaluate_vla_beam_pbcor_matrix_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (beam, num_sources, l, m, freq_ghz, p1, p2, p3); } #ifdef __cplusplus } #endif /* Kernels. ================================================================ */ /* Single precision. */ __global__ void oskar_evaluate_vla_beam_pbcor_cudak_f(float* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; beam[i] = oskar_vla_pbcor_inline_f(l[i], m[i], freq_ghz, p1, p2, p3); } __global__ void oskar_evaluate_vla_beam_pbcor_complex_cudak_f(float2* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; beam[i].x = oskar_vla_pbcor_inline_f(l[i], m[i], freq_ghz, p1, p2, p3); beam[i].y = 0.0f; } __global__ void oskar_evaluate_vla_beam_pbcor_matrix_cudak_f(float4c* beam, int num_sources, const float* l, const float* m, const float freq_ghz, const float p1, const float p2, const float p3) { float t; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; t = oskar_vla_pbcor_inline_f(l[i], m[i], freq_ghz, p1, p2, p3); beam[i].a.x = t; beam[i].a.y = 0.0f; beam[i].b.x = 0.0f; beam[i].b.y = 0.0f; beam[i].c.x = 0.0f; beam[i].c.y = 0.0f; beam[i].d.x = t; beam[i].d.y = 0.0f; } /* Double precision. */ __global__ void oskar_evaluate_vla_beam_pbcor_cudak_d(double* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; beam[i] = oskar_vla_pbcor_inline_d(l[i], m[i], freq_ghz, p1, p2, p3); } __global__ void oskar_evaluate_vla_beam_pbcor_complex_cudak_d(double2* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; beam[i].x = oskar_vla_pbcor_inline_d(l[i], m[i], freq_ghz, p1, p2, p3); beam[i].y = 0.0; } __global__ void oskar_evaluate_vla_beam_pbcor_matrix_cudak_d(double4c* beam, int num_sources, const double* l, const double* m, const double freq_ghz, const double p1, const double p2, const double p3) { double t; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_sources) return; t = oskar_vla_pbcor_inline_d(l[i], m[i], freq_ghz, p1, p2, p3); beam[i].a.x = t; beam[i].a.y = 0.0; beam[i].b.x = 0.0; beam[i].b.y = 0.0; beam[i].c.x = 0.0; beam[i].c.y = 0.0; beam[i].d.x = t; beam[i].d.y = 0.0; }
3ebf38eba46c1427c7ed59a68a9478749b83b0ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unistd.h> #include <sys/time.h> #include "svt_utils.h" #include <math.h> #include <sched.h> #include "semaphore.c" #include <thrust/device_vector.h> // global variables int VERBOSE = 0; int TIMER = 0; // CUDA timer macros hipEvent_t c_start, c_stop; inline void start_time() { if ( TIMER ) { hipEventCreate(&c_start); hipEventCreate(&c_stop); hipEventRecord(c_start, 0); } } inline float stop_time(const char *msg) { float elapsedTime = 0; if ( TIMER ) { hipEventRecord(c_stop, 0); hipEventSynchronize(c_stop); hipEventElapsedTime(&elapsedTime, c_start, c_stop); if ( VERBOSE ) printf("Time to %s: %.3f ms\n", msg, elapsedTime); } return elapsedTime; } // calculate mean and stdev on an array of count floats void get_mean(float *times_array, int count, float *mean, float *stdev) { int j; float sum = 0; float sumsqr = 0; *mean = *stdev = 0; for (j=0; j < count; j++) { sum += times_array[j]; sumsqr += pow(times_array[j],2); } *mean = sum/(float)count; *stdev = sqrt(abs((sumsqr/(float)count) - pow(*mean,2))); } __global__ void init_arrays_GPU (fout_arrays* fout_dev, evt_arrays* evt_dev, int* events ) { int ie, ir, ip; *events = 0; ie = blockIdx.x; // events index ir = blockIdx.y; // roads index ip = threadIdx.x; // NSVX_PLANE+1 // initialize evt arrays.... evt_dev->evt_nroads[ie] = 0; evt_dev->evt_ee_word[ie] = 0; evt_dev->evt_err_sum[ie] =0; evt_dev->evt_zid[ie][ir] = 0; evt_dev->evt_err[ie][ir] = 0; evt_dev->evt_cable_sect[ie][ir] = 0; evt_dev->evt_sect[ie][ir] = 0; evt_dev->evt_road[ie][ir] = 0; evt_dev->evt_nhits[ie][ir][ip] = 0; // initialize fout arrays.... fout_dev->fout_ntrks[ie] = 0; fout_dev->fout_parity[ie] = 0; fout_dev->fout_ee_word[ie] = 0; fout_dev->fout_err_sum[ie] = 0; fout_dev->fout_cdferr[ie] = 0; fout_dev->fout_svterr[ie] = 0; } void setedata_GPU(tf_arrays_t tf, struct extra_data *edata_dev) { int len; len = SVTSIM_NBAR * FITBLOCK * sizeof(int); MY_CUDA_CHECK(hipMemcpy(edata_dev->whichFit, tf->whichFit, len, hipMemcpyHostToDevice)); len = NFITPAR * (DIMSPA+1) * SVTSIM_NBAR * FITBLOCK * sizeof(long long int); MY_CUDA_CHECK(hipMemcpy(edata_dev->lfitparfcon, tf->lfitparfcon, len, hipMemcpyHostToDevice)); len = NEVTS * sizeof(int); MY_CUDA_CHECK(hipMemcpy(edata_dev->wedge, tf->wedge, len, hipMemcpyHostToDevice)); } int svt_GPU(tf_arrays_t tf, struct extra_data *edata_dev, unsigned int *data_in, int n_words, float *timer, int nothrust, unsigned int* dataout) { int tEvts=0; int ndata=0; dim3 blocks(NEVTS,MAXROAD); start_time(); // Cuda Malloc int* d_tEvts; MY_CUDA_CHECK(hipMalloc((void**)&d_tEvts, sizeof(int))); int* ndata_dev; MY_CUDA_CHECK(hipMalloc((void**)&ndata_dev, sizeof(int))); struct evt_arrays* evt_dev; MY_CUDA_CHECK(hipMalloc((void**)&evt_dev, sizeof(evt_arrays))); struct fep_arrays *fep_dev; MY_CUDA_CHECK(hipMalloc((void**)&fep_dev, sizeof(fep_arrays))); struct fit_arrays *fit_dev; MY_CUDA_CHECK(hipMalloc((void**)&fit_dev, sizeof(fit_arrays))); struct fout_arrays *fout_dev; MY_CUDA_CHECK(hipMalloc((void**)&fout_dev, sizeof(fout_arrays))); unsigned int *dataout_dev; MY_CUDA_CHECK(hipMalloc((void**)&dataout_dev, n_words*sizeof(int))); // initialize structures hipLaunchKernelGGL(( init_arrays_GPU), dim3(blocks), dim3(NSVX_PLANE+1), 0, 0, fout_dev, evt_dev, d_tEvts); if ( nothrust ) { // use pure cuda version of unpack unsigned int *d_data_in; long sizeW = sizeof(int) * n_words; hipMalloc((void **)&d_data_in, sizeW); hipMemcpy(d_data_in, data_in, sizeW, hipMemcpyHostToDevice); timer[0] = stop_time("input copy and initialize"); start_time(); gf_unpack_cuda_GPU(d_data_in, n_words, evt_dev, d_tEvts ); hipFree(d_data_in); } else { // use thrust version of unpack thrust::device_vector<unsigned int> d_vec(n_words+1); d_vec[0] = 0; thrust::copy(data_in, data_in + n_words, d_vec.begin()+1); timer[0] = stop_time("input copy and initialize"); start_time(); gf_unpack_thrust_GPU(d_vec, n_words, evt_dev, d_tEvts ); } timer[1] = stop_time("input unpack"); MY_CUDA_CHECK(hipMemcpy(&tEvts, d_tEvts, sizeof(int), hipMemcpyDeviceToHost)); tf->totEvts = tEvts; // Fep comb and set start_time(); gf_fep_GPU( evt_dev, fep_dev, tEvts ); timer[2] =stop_time("compute fep combinations"); // Fit and set Fout start_time(); gf_fit_GPU(fep_dev, evt_dev, edata_dev, fit_dev, fout_dev, tEvts, dataout_dev, ndata_dev); timer[3] = stop_time("fit data and set output"); // Output copy DtoH start_time(); MY_CUDA_CHECK(hipMemcpy(&ndata, ndata_dev, sizeof(int), hipMemcpyDeviceToHost)); MY_CUDA_CHECK(hipMemcpy(dataout, dataout_dev, ndata * sizeof(int), hipMemcpyDeviceToHost)); MY_CUDA_CHECK( hipFree(evt_dev) ); MY_CUDA_CHECK( hipFree(fep_dev) ); MY_CUDA_CHECK( hipFree(fit_dev) ); MY_CUDA_CHECK( hipFree(fout_dev)); MY_CUDA_CHECK( hipFree(d_tEvts)); MY_CUDA_CHECK( hipFree(ndata_dev)); MY_CUDA_CHECK( hipFree(dataout_dev)); timer[4] = stop_time("copy output (DtoH)"); return ndata; } void help(char* prog) { printf("Use %s [-i fileIn] [-o fileOut] [-s cpu || gpu] [-l #loops] [-u] [-v] [-t] [-p priority] [-h] \n\n", prog); printf(" -i fileIn Input file (Default: hbout_w6_100evts).\n"); printf(" -o fileOut Output file (Default: gfout.txt).\n"); printf(" -s cpu || gpu Switch between CPU or GPU version (Default: gpu).\n"); printf(" -l loops Number of executions (Default: 1).\n"); printf(" -u Use pure cuda version for unpack (Default: use thrust version).\n"); printf(" -v Print verbose messages.\n"); printf(" -t Calculate timing.\n"); printf(" -p priority Set scheduling priority to <priority> and cpu affinity - you nedd to be ROOT - (Default: disable).\n"); printf(" -h This help.\n"); } int main(int argc, char* argv[]) { int c; char* fileIn = "hbout_w6_100evts"; char* fileOut = "gfout.txt"; char* where = "gpu"; int N_LOOPS = 1; int PRIORITY = 0; int NOTHRUST = 0; while ( (c = getopt(argc, argv, "i:s:o:l:uvtp:h")) != -1 ) { switch(c) { case 'i': fileIn = optarg; break; case 'o': fileOut = optarg; break; case 's': where = optarg; break; case 'l': N_LOOPS = atoi(optarg); break; case 'v': VERBOSE = 1; break; case 'u': NOTHRUST = 1; break; case 't': TIMER = 1; break; case 'p': PRIORITY = atoi(optarg); break; case 'h': help(argv[0]); return 0; } } if (access(fileIn, 0) == -1) { printf("ERROR: File %s doesn't exist.\n", fileIn); return 1; } int semid; if ( PRIORITY ) { // lock control so no one else can run at the same time and crash the machine key_t key = (key_t) 0xdeadface; if ((semid = initsem(key, 1)) == -1) { perror("initsem"); exit(1); } printf("Trying to gain control...\n"); lock(semid); // set scheduling priority & CPU affinity struct sched_param p; p.sched_priority = PRIORITY; if (sched_setscheduler(0, SCHED_FIFO, &p)) { perror("setscheduler"); return -1; } if (sched_getparam(0, &p) == 0) printf("Running with scheduling priority = %d\n", p.sched_priority); unsigned long mask; if (sched_getaffinity(0, sizeof(mask), (cpu_set_t*)&mask) < 0) { perror("sched_getaffinity"); } printf("my affinity mask is: %08lx\n", mask); mask = 1; // processor 1 only if (sched_setaffinity(0, sizeof(mask), (cpu_set_t*)&mask) < 0) { perror("sched_setaffinity"); return -1; } if (sched_getaffinity(0, sizeof(mask), (cpu_set_t*)&mask) < 0) { perror("sched_getaffinity"); } printf("my affinity mask is: %08lx\n", mask); } // Do we want to skip the first "skip" runs from mean calculation? int skip = 0; int n_iters = N_LOOPS+skip; float initg = 0; float fcon = 0; float timerange = 0; float ptime[5]; float ptime_cpu[3]; float times_array[6][N_LOOPS]; float times_array_cpu[4][N_LOOPS]; struct timeval time_start, time_stop; struct timeval tBegin, tEnd; struct timeval ptBegin, ptEnd; if ( strcmp(where,"gpu") == 0 ) { // GPU if ( TIMER ) gettimeofday(&tBegin, NULL); // this is just to measure time to initialize GPU hipEvent_t init; MY_CUDA_CHECK( hipEventCreate( &init ) ); if ( TIMER ) { gettimeofday(&tEnd, NULL); initg = ((tEnd.tv_usec + 1000000 * tEnd.tv_sec) - (tBegin.tv_usec + 1000000 * tBegin.tv_sec))/1000000.0; } } // read input file FILE* hbout = fopen(fileIn,"r"); if ( hbout == NULL ) { printf("ERROR: Cannot open input file\n"); exit(1); } unsigned int hexaval; unsigned int *data_send = (unsigned int*)malloc(2500000*sizeof(unsigned)); if ( data_send == (unsigned int*) NULL ) { perror("malloc"); return 2; } char word[16]; int k=0; // number of words read if ( VERBOSE ) printf("Reading input file %s... ", fileIn); while (fscanf(hbout, "%s", word) != EOF) { hexaval = strtol(word,NULL,16); data_send[k] = hexaval; k++; } fclose(hbout); int outword; unsigned int *dataout = (unsigned int*)malloc(k*sizeof(unsigned)); tf_arrays_t tf; gf_init(&tf); svtsim_fconread(tf); if ( TIMER ) gettimeofday(&tBegin, NULL); struct extra_data *edata_dev; if ( strcmp(where,"cpu") != 0 ) { // GPU if ( TIMER ) start_time(); MY_CUDA_CHECK(hipMalloc((void**)&edata_dev, sizeof(struct extra_data))); setedata_GPU(tf, edata_dev); if ( TIMER ) fcon = stop_time("Copy detector configuration data"); } while (n_iters--) { if ( strcmp(where,"cpu") == 0 ) { // CPU if ( TIMER ) gettimeofday(&time_start, NULL); if ( VERBOSE ) printf("Start working on CPU..... \n"); if ( TIMER ) gettimeofday(&ptBegin, NULL); gf_fep_unpack(tf, k, data_send); if ( TIMER) { gettimeofday(&ptEnd, NULL); timerange = ((ptEnd.tv_usec + 1000000 * ptEnd.tv_sec) - (ptBegin.tv_usec + 1000000 * ptBegin.tv_sec))/1000.0; if ( VERBOSE ) printf("Time to CPU unpack: %.3f ms\n", timerange); ptime_cpu[0] = timerange; gettimeofday(&ptBegin, NULL); } gf_fep_comb(tf); if ( TIMER) { gettimeofday(&ptEnd, NULL); timerange = ((ptEnd.tv_usec + 1000000 * ptEnd.tv_sec) - (ptBegin.tv_usec + 1000000 * ptBegin.tv_sec))/1000.0; if ( VERBOSE ) printf("Time to CPU comb: %.3f ms\n", timerange); ptime_cpu[1] = timerange; gettimeofday(&ptBegin, NULL); } gf_fit(tf); gf_comparator(tf); if ( TIMER) { gettimeofday(&ptEnd, NULL); timerange = ((ptEnd.tv_usec + 1000000 * ptEnd.tv_sec) - (ptBegin.tv_usec + 1000000 * ptBegin.tv_sec))/1000.0; if ( VERBOSE ) printf("Time to CPU fit: %.3f ms\n", timerange); ptime_cpu[2] = timerange; gettimeofday(&time_stop, NULL); } if ( VERBOSE ) printf(".... fits %d events! \n", tf->totEvts); } else { // GPU if ( VERBOSE ) printf("Start working on GPU...\n"); if ( TIMER ) gettimeofday(&time_start, NULL); outword = svt_GPU(tf, edata_dev, data_send, k, ptime, NOTHRUST, dataout); if ( TIMER ) gettimeofday(&time_stop, NULL); } if ( TIMER ) { if ( n_iters < N_LOOPS ) { // skip the first "skip" iterations timerange = ((time_stop.tv_usec + 1000000 * time_stop.tv_sec) - (time_start.tv_usec + 1000000 * time_start.tv_sec))/1000.0; if ( strcmp(where,"cpu") != 0 ) { // GPU times_array[0][n_iters] = timerange; for (int t=1; t < 6; ++t) times_array[t][n_iters] = ptime[t-1]; } else { //CPU times_array_cpu[0][n_iters] = timerange; for (int t=1; t < 4; ++t) times_array_cpu[t][n_iters] = ptime_cpu[t-1]; } } } } // end iterations if ( strcmp(where,"cpu") != 0 ) { MY_CUDA_CHECK(hipFree(edata_dev)); } if ( TIMER ) { gettimeofday(&tEnd, NULL); timerange = ((tEnd.tv_usec + 1000000 * tEnd.tv_sec) - (tBegin.tv_usec + 1000000 * tBegin.tv_sec))/1000.0; if ( VERBOSE ) printf("Time to complete all: %.3f ms\n", timerange); } // write output file FILE* OUTCHECK = fopen(fileOut, "w"); if ( strcmp(where,"cpu") == 0 ) // CPU for (int i=0; i < tf->out->ndata; i++) fprintf(OUTCHECK,"%.6x\n", tf->out->data[i]); else // GPU for (int i=0; i < outword; i++) fprintf(OUTCHECK,"%.6x\n", dataout[i]); fclose(OUTCHECK); // write file with times if ( TIMER ) { char fileTimes[1024]; FILE *ft; if ( strcmp(where,"cpu") != 0 ) { // GPU float mean[6]; float stdev[6]; for (int t=0; t < 6; ++t) get_mean(times_array[t], N_LOOPS, &mean[t], &stdev[t]); sprintf(fileTimes, "ListTimesGPU-Evts_%d_Loops_%d.txt", NEVTS, N_LOOPS); ft = fopen(fileTimes, "w"); fprintf(ft,"# #NEvts: %d, Loops: %d, mean: %.3f ms, stdev: %.3f ms\n", NEVTS, N_LOOPS, mean[0], stdev[0]); fprintf(ft,"# initialize GPU: %.3f ms; copy detector configuration data: %.3f ms\n", initg, fcon); fprintf(ft,"# input copy and initialize --> mean: %.3f ms, stdev: %.3f ms\n", mean[1], stdev[1]); fprintf(ft,"# input unpack --> mean: %.3f ms, stdev: %.3f ms\n", mean[2], stdev[2]); fprintf(ft,"# compute fep combinations --> mean: %.3f ms, stdev: %.3f ms\n", mean[3], stdev[3]); fprintf(ft,"# fit data and set output --> mean: %.3f ms, stdev: %.3f ms\n", mean[4], stdev[4]); fprintf(ft,"# copy output (DtoH) --> mean: %.3f ms, stdev: %.3f ms\n", mean[5], stdev[5]); for (int j=0 ; j < (N_LOOPS); j++) { for (int t=0; t < 6; ++t) fprintf(ft,"%.3f ",times_array[t][j]); fprintf(ft,"\n"); } } else { // CPU float mean[4]; float stdev[4]; for (int t=0; t < 4; ++t) get_mean(times_array_cpu[t], N_LOOPS, &mean[t], &stdev[t]); sprintf(fileTimes, "ListTimesCPU-Evts_%d_Loops_%d.txt", NEVTS, N_LOOPS); ft = fopen(fileTimes, "w"); fprintf(ft,"# #NEvts: %d, Loops: %d, mean: %.3f ms, stdev: %.3f ms\n", NEVTS, N_LOOPS, mean[0], stdev[0]); fprintf(ft,"# input unpack --> mean: %.3f ms, stdev: %.3f ms\n", mean[1], stdev[1]); fprintf(ft,"# compute fep combinations --> mean: %.3f ms, stdev: %.3f ms\n", mean[2], stdev[2]); fprintf(ft,"# fit data and set output --> mean: %.3f ms, stdev: %.3f ms\n", mean[3], stdev[3]); for (int j=0 ; j < (N_LOOPS); j++) { for (int t=0; t < 4; ++t) fprintf(ft,"%.3f ",times_array_cpu[t][j]); fprintf(ft,"\n"); } } fclose(ft); printf("All done. See %s for timing.\n", fileTimes); } if ( PRIORITY ) { if ( VERBOSE ) printf("Unlocking control...\n"); unlock(semid); } free(data_send); free(tf); return 0; }
3ebf38eba46c1427c7ed59a68a9478749b83b0ac.cu
#include <unistd.h> #include <sys/time.h> #include "svt_utils.h" #include <math.h> #include <sched.h> #include "semaphore.c" #include <thrust/device_vector.h> // global variables int VERBOSE = 0; int TIMER = 0; // CUDA timer macros cudaEvent_t c_start, c_stop; inline void start_time() { if ( TIMER ) { cudaEventCreate(&c_start); cudaEventCreate(&c_stop); cudaEventRecord(c_start, 0); } } inline float stop_time(const char *msg) { float elapsedTime = 0; if ( TIMER ) { cudaEventRecord(c_stop, 0); cudaEventSynchronize(c_stop); cudaEventElapsedTime(&elapsedTime, c_start, c_stop); if ( VERBOSE ) printf("Time to %s: %.3f ms\n", msg, elapsedTime); } return elapsedTime; } // calculate mean and stdev on an array of count floats void get_mean(float *times_array, int count, float *mean, float *stdev) { int j; float sum = 0; float sumsqr = 0; *mean = *stdev = 0; for (j=0; j < count; j++) { sum += times_array[j]; sumsqr += pow(times_array[j],2); } *mean = sum/(float)count; *stdev = sqrt(abs((sumsqr/(float)count) - pow(*mean,2))); } __global__ void init_arrays_GPU (fout_arrays* fout_dev, evt_arrays* evt_dev, int* events ) { int ie, ir, ip; *events = 0; ie = blockIdx.x; // events index ir = blockIdx.y; // roads index ip = threadIdx.x; // NSVX_PLANE+1 // initialize evt arrays.... evt_dev->evt_nroads[ie] = 0; evt_dev->evt_ee_word[ie] = 0; evt_dev->evt_err_sum[ie] =0; evt_dev->evt_zid[ie][ir] = 0; evt_dev->evt_err[ie][ir] = 0; evt_dev->evt_cable_sect[ie][ir] = 0; evt_dev->evt_sect[ie][ir] = 0; evt_dev->evt_road[ie][ir] = 0; evt_dev->evt_nhits[ie][ir][ip] = 0; // initialize fout arrays.... fout_dev->fout_ntrks[ie] = 0; fout_dev->fout_parity[ie] = 0; fout_dev->fout_ee_word[ie] = 0; fout_dev->fout_err_sum[ie] = 0; fout_dev->fout_cdferr[ie] = 0; fout_dev->fout_svterr[ie] = 0; } void setedata_GPU(tf_arrays_t tf, struct extra_data *edata_dev) { int len; len = SVTSIM_NBAR * FITBLOCK * sizeof(int); MY_CUDA_CHECK(cudaMemcpy(edata_dev->whichFit, tf->whichFit, len, cudaMemcpyHostToDevice)); len = NFITPAR * (DIMSPA+1) * SVTSIM_NBAR * FITBLOCK * sizeof(long long int); MY_CUDA_CHECK(cudaMemcpy(edata_dev->lfitparfcon, tf->lfitparfcon, len, cudaMemcpyHostToDevice)); len = NEVTS * sizeof(int); MY_CUDA_CHECK(cudaMemcpy(edata_dev->wedge, tf->wedge, len, cudaMemcpyHostToDevice)); } int svt_GPU(tf_arrays_t tf, struct extra_data *edata_dev, unsigned int *data_in, int n_words, float *timer, int nothrust, unsigned int* dataout) { int tEvts=0; int ndata=0; dim3 blocks(NEVTS,MAXROAD); start_time(); // Cuda Malloc int* d_tEvts; MY_CUDA_CHECK(cudaMalloc((void**)&d_tEvts, sizeof(int))); int* ndata_dev; MY_CUDA_CHECK(cudaMalloc((void**)&ndata_dev, sizeof(int))); struct evt_arrays* evt_dev; MY_CUDA_CHECK(cudaMalloc((void**)&evt_dev, sizeof(evt_arrays))); struct fep_arrays *fep_dev; MY_CUDA_CHECK(cudaMalloc((void**)&fep_dev, sizeof(fep_arrays))); struct fit_arrays *fit_dev; MY_CUDA_CHECK(cudaMalloc((void**)&fit_dev, sizeof(fit_arrays))); struct fout_arrays *fout_dev; MY_CUDA_CHECK(cudaMalloc((void**)&fout_dev, sizeof(fout_arrays))); unsigned int *dataout_dev; MY_CUDA_CHECK(cudaMalloc((void**)&dataout_dev, n_words*sizeof(int))); // initialize structures init_arrays_GPU<<<blocks, NSVX_PLANE+1>>>(fout_dev, evt_dev, d_tEvts); if ( nothrust ) { // use pure cuda version of unpack unsigned int *d_data_in; long sizeW = sizeof(int) * n_words; cudaMalloc((void **)&d_data_in, sizeW); cudaMemcpy(d_data_in, data_in, sizeW, cudaMemcpyHostToDevice); timer[0] = stop_time("input copy and initialize"); start_time(); gf_unpack_cuda_GPU(d_data_in, n_words, evt_dev, d_tEvts ); cudaFree(d_data_in); } else { // use thrust version of unpack thrust::device_vector<unsigned int> d_vec(n_words+1); d_vec[0] = 0; thrust::copy(data_in, data_in + n_words, d_vec.begin()+1); timer[0] = stop_time("input copy and initialize"); start_time(); gf_unpack_thrust_GPU(d_vec, n_words, evt_dev, d_tEvts ); } timer[1] = stop_time("input unpack"); MY_CUDA_CHECK(cudaMemcpy(&tEvts, d_tEvts, sizeof(int), cudaMemcpyDeviceToHost)); tf->totEvts = tEvts; // Fep comb and set start_time(); gf_fep_GPU( evt_dev, fep_dev, tEvts ); timer[2] =stop_time("compute fep combinations"); // Fit and set Fout start_time(); gf_fit_GPU(fep_dev, evt_dev, edata_dev, fit_dev, fout_dev, tEvts, dataout_dev, ndata_dev); timer[3] = stop_time("fit data and set output"); // Output copy DtoH start_time(); MY_CUDA_CHECK(cudaMemcpy(&ndata, ndata_dev, sizeof(int), cudaMemcpyDeviceToHost)); MY_CUDA_CHECK(cudaMemcpy(dataout, dataout_dev, ndata * sizeof(int), cudaMemcpyDeviceToHost)); MY_CUDA_CHECK( cudaFree(evt_dev) ); MY_CUDA_CHECK( cudaFree(fep_dev) ); MY_CUDA_CHECK( cudaFree(fit_dev) ); MY_CUDA_CHECK( cudaFree(fout_dev)); MY_CUDA_CHECK( cudaFree(d_tEvts)); MY_CUDA_CHECK( cudaFree(ndata_dev)); MY_CUDA_CHECK( cudaFree(dataout_dev)); timer[4] = stop_time("copy output (DtoH)"); return ndata; } void help(char* prog) { printf("Use %s [-i fileIn] [-o fileOut] [-s cpu || gpu] [-l #loops] [-u] [-v] [-t] [-p priority] [-h] \n\n", prog); printf(" -i fileIn Input file (Default: hbout_w6_100evts).\n"); printf(" -o fileOut Output file (Default: gfout.txt).\n"); printf(" -s cpu || gpu Switch between CPU or GPU version (Default: gpu).\n"); printf(" -l loops Number of executions (Default: 1).\n"); printf(" -u Use pure cuda version for unpack (Default: use thrust version).\n"); printf(" -v Print verbose messages.\n"); printf(" -t Calculate timing.\n"); printf(" -p priority Set scheduling priority to <priority> and cpu affinity - you nedd to be ROOT - (Default: disable).\n"); printf(" -h This help.\n"); } int main(int argc, char* argv[]) { int c; char* fileIn = "hbout_w6_100evts"; char* fileOut = "gfout.txt"; char* where = "gpu"; int N_LOOPS = 1; int PRIORITY = 0; int NOTHRUST = 0; while ( (c = getopt(argc, argv, "i:s:o:l:uvtp:h")) != -1 ) { switch(c) { case 'i': fileIn = optarg; break; case 'o': fileOut = optarg; break; case 's': where = optarg; break; case 'l': N_LOOPS = atoi(optarg); break; case 'v': VERBOSE = 1; break; case 'u': NOTHRUST = 1; break; case 't': TIMER = 1; break; case 'p': PRIORITY = atoi(optarg); break; case 'h': help(argv[0]); return 0; } } if (access(fileIn, 0) == -1) { printf("ERROR: File %s doesn't exist.\n", fileIn); return 1; } int semid; if ( PRIORITY ) { // lock control so no one else can run at the same time and crash the machine key_t key = (key_t) 0xdeadface; if ((semid = initsem(key, 1)) == -1) { perror("initsem"); exit(1); } printf("Trying to gain control...\n"); lock(semid); // set scheduling priority & CPU affinity struct sched_param p; p.sched_priority = PRIORITY; if (sched_setscheduler(0, SCHED_FIFO, &p)) { perror("setscheduler"); return -1; } if (sched_getparam(0, &p) == 0) printf("Running with scheduling priority = %d\n", p.sched_priority); unsigned long mask; if (sched_getaffinity(0, sizeof(mask), (cpu_set_t*)&mask) < 0) { perror("sched_getaffinity"); } printf("my affinity mask is: %08lx\n", mask); mask = 1; // processor 1 only if (sched_setaffinity(0, sizeof(mask), (cpu_set_t*)&mask) < 0) { perror("sched_setaffinity"); return -1; } if (sched_getaffinity(0, sizeof(mask), (cpu_set_t*)&mask) < 0) { perror("sched_getaffinity"); } printf("my affinity mask is: %08lx\n", mask); } // Do we want to skip the first "skip" runs from mean calculation? int skip = 0; int n_iters = N_LOOPS+skip; float initg = 0; float fcon = 0; float timerange = 0; float ptime[5]; float ptime_cpu[3]; float times_array[6][N_LOOPS]; float times_array_cpu[4][N_LOOPS]; struct timeval time_start, time_stop; struct timeval tBegin, tEnd; struct timeval ptBegin, ptEnd; if ( strcmp(where,"gpu") == 0 ) { // GPU if ( TIMER ) gettimeofday(&tBegin, NULL); // this is just to measure time to initialize GPU cudaEvent_t init; MY_CUDA_CHECK( cudaEventCreate( &init ) ); if ( TIMER ) { gettimeofday(&tEnd, NULL); initg = ((tEnd.tv_usec + 1000000 * tEnd.tv_sec) - (tBegin.tv_usec + 1000000 * tBegin.tv_sec))/1000000.0; } } // read input file FILE* hbout = fopen(fileIn,"r"); if ( hbout == NULL ) { printf("ERROR: Cannot open input file\n"); exit(1); } unsigned int hexaval; unsigned int *data_send = (unsigned int*)malloc(2500000*sizeof(unsigned)); if ( data_send == (unsigned int*) NULL ) { perror("malloc"); return 2; } char word[16]; int k=0; // number of words read if ( VERBOSE ) printf("Reading input file %s... ", fileIn); while (fscanf(hbout, "%s", word) != EOF) { hexaval = strtol(word,NULL,16); data_send[k] = hexaval; k++; } fclose(hbout); int outword; unsigned int *dataout = (unsigned int*)malloc(k*sizeof(unsigned)); tf_arrays_t tf; gf_init(&tf); svtsim_fconread(tf); if ( TIMER ) gettimeofday(&tBegin, NULL); struct extra_data *edata_dev; if ( strcmp(where,"cpu") != 0 ) { // GPU if ( TIMER ) start_time(); MY_CUDA_CHECK(cudaMalloc((void**)&edata_dev, sizeof(struct extra_data))); setedata_GPU(tf, edata_dev); if ( TIMER ) fcon = stop_time("Copy detector configuration data"); } while (n_iters--) { if ( strcmp(where,"cpu") == 0 ) { // CPU if ( TIMER ) gettimeofday(&time_start, NULL); if ( VERBOSE ) printf("Start working on CPU..... \n"); if ( TIMER ) gettimeofday(&ptBegin, NULL); gf_fep_unpack(tf, k, data_send); if ( TIMER) { gettimeofday(&ptEnd, NULL); timerange = ((ptEnd.tv_usec + 1000000 * ptEnd.tv_sec) - (ptBegin.tv_usec + 1000000 * ptBegin.tv_sec))/1000.0; if ( VERBOSE ) printf("Time to CPU unpack: %.3f ms\n", timerange); ptime_cpu[0] = timerange; gettimeofday(&ptBegin, NULL); } gf_fep_comb(tf); if ( TIMER) { gettimeofday(&ptEnd, NULL); timerange = ((ptEnd.tv_usec + 1000000 * ptEnd.tv_sec) - (ptBegin.tv_usec + 1000000 * ptBegin.tv_sec))/1000.0; if ( VERBOSE ) printf("Time to CPU comb: %.3f ms\n", timerange); ptime_cpu[1] = timerange; gettimeofday(&ptBegin, NULL); } gf_fit(tf); gf_comparator(tf); if ( TIMER) { gettimeofday(&ptEnd, NULL); timerange = ((ptEnd.tv_usec + 1000000 * ptEnd.tv_sec) - (ptBegin.tv_usec + 1000000 * ptBegin.tv_sec))/1000.0; if ( VERBOSE ) printf("Time to CPU fit: %.3f ms\n", timerange); ptime_cpu[2] = timerange; gettimeofday(&time_stop, NULL); } if ( VERBOSE ) printf(".... fits %d events! \n", tf->totEvts); } else { // GPU if ( VERBOSE ) printf("Start working on GPU...\n"); if ( TIMER ) gettimeofday(&time_start, NULL); outword = svt_GPU(tf, edata_dev, data_send, k, ptime, NOTHRUST, dataout); if ( TIMER ) gettimeofday(&time_stop, NULL); } if ( TIMER ) { if ( n_iters < N_LOOPS ) { // skip the first "skip" iterations timerange = ((time_stop.tv_usec + 1000000 * time_stop.tv_sec) - (time_start.tv_usec + 1000000 * time_start.tv_sec))/1000.0; if ( strcmp(where,"cpu") != 0 ) { // GPU times_array[0][n_iters] = timerange; for (int t=1; t < 6; ++t) times_array[t][n_iters] = ptime[t-1]; } else { //CPU times_array_cpu[0][n_iters] = timerange; for (int t=1; t < 4; ++t) times_array_cpu[t][n_iters] = ptime_cpu[t-1]; } } } } // end iterations if ( strcmp(where,"cpu") != 0 ) { MY_CUDA_CHECK(cudaFree(edata_dev)); } if ( TIMER ) { gettimeofday(&tEnd, NULL); timerange = ((tEnd.tv_usec + 1000000 * tEnd.tv_sec) - (tBegin.tv_usec + 1000000 * tBegin.tv_sec))/1000.0; if ( VERBOSE ) printf("Time to complete all: %.3f ms\n", timerange); } // write output file FILE* OUTCHECK = fopen(fileOut, "w"); if ( strcmp(where,"cpu") == 0 ) // CPU for (int i=0; i < tf->out->ndata; i++) fprintf(OUTCHECK,"%.6x\n", tf->out->data[i]); else // GPU for (int i=0; i < outword; i++) fprintf(OUTCHECK,"%.6x\n", dataout[i]); fclose(OUTCHECK); // write file with times if ( TIMER ) { char fileTimes[1024]; FILE *ft; if ( strcmp(where,"cpu") != 0 ) { // GPU float mean[6]; float stdev[6]; for (int t=0; t < 6; ++t) get_mean(times_array[t], N_LOOPS, &mean[t], &stdev[t]); sprintf(fileTimes, "ListTimesGPU-Evts_%d_Loops_%d.txt", NEVTS, N_LOOPS); ft = fopen(fileTimes, "w"); fprintf(ft,"# #NEvts: %d, Loops: %d, mean: %.3f ms, stdev: %.3f ms\n", NEVTS, N_LOOPS, mean[0], stdev[0]); fprintf(ft,"# initialize GPU: %.3f ms; copy detector configuration data: %.3f ms\n", initg, fcon); fprintf(ft,"# input copy and initialize --> mean: %.3f ms, stdev: %.3f ms\n", mean[1], stdev[1]); fprintf(ft,"# input unpack --> mean: %.3f ms, stdev: %.3f ms\n", mean[2], stdev[2]); fprintf(ft,"# compute fep combinations --> mean: %.3f ms, stdev: %.3f ms\n", mean[3], stdev[3]); fprintf(ft,"# fit data and set output --> mean: %.3f ms, stdev: %.3f ms\n", mean[4], stdev[4]); fprintf(ft,"# copy output (DtoH) --> mean: %.3f ms, stdev: %.3f ms\n", mean[5], stdev[5]); for (int j=0 ; j < (N_LOOPS); j++) { for (int t=0; t < 6; ++t) fprintf(ft,"%.3f ",times_array[t][j]); fprintf(ft,"\n"); } } else { // CPU float mean[4]; float stdev[4]; for (int t=0; t < 4; ++t) get_mean(times_array_cpu[t], N_LOOPS, &mean[t], &stdev[t]); sprintf(fileTimes, "ListTimesCPU-Evts_%d_Loops_%d.txt", NEVTS, N_LOOPS); ft = fopen(fileTimes, "w"); fprintf(ft,"# #NEvts: %d, Loops: %d, mean: %.3f ms, stdev: %.3f ms\n", NEVTS, N_LOOPS, mean[0], stdev[0]); fprintf(ft,"# input unpack --> mean: %.3f ms, stdev: %.3f ms\n", mean[1], stdev[1]); fprintf(ft,"# compute fep combinations --> mean: %.3f ms, stdev: %.3f ms\n", mean[2], stdev[2]); fprintf(ft,"# fit data and set output --> mean: %.3f ms, stdev: %.3f ms\n", mean[3], stdev[3]); for (int j=0 ; j < (N_LOOPS); j++) { for (int t=0; t < 4; ++t) fprintf(ft,"%.3f ",times_array_cpu[t][j]); fprintf(ft,"\n"); } } fclose(ft); printf("All done. See %s for timing.\n", fileTimes); } if ( PRIORITY ) { if ( VERBOSE ) printf("Unlocking control...\n"); unlock(semid); } free(data_send); free(tf); return 0; }
35813cb04f2a961f26ba554832ede885d5a8705b.hip
// !!! This is a file automatically generated by hipify!!! //---------------------------------------------------------------------------// // Copyright (c) 2015 Jakub Szuppe <j.szuppe@gmail.com> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #include <algorithm> #include <iostream> #include <vector> #include <thrust/find.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "perf.hpp" // Max integer that can be generated by rand_int() function. int rand_int_max = 25; int rand_int() { return static_cast<int>((rand() / double(RAND_MAX)) * rand_int_max); } int main(int argc, char *argv[]) { perf_parse_args(argc, argv); std::cout << "size: " << PERF_N << std::endl; // create vector of random numbers on the host thrust::host_vector<int> host_vector(PERF_N); thrust::generate(host_vector.begin(), host_vector.end(), rand_int); thrust::device_vector<int> v = host_vector; // trying to find element that isn't in vector (worst-case scenario) int wanted = rand_int_max + 1; // result thrust::device_vector<int>::iterator device_result_it; perf_timer t; for(size_t trial = 0; trial < PERF_TRIALS; trial++){ t.start(); device_result_it = thrust::find(v.begin(), v.end(), wanted); hipDeviceSynchronize(); t.stop(); } std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl; // verify if(device_result_it != v.end()){ std::cout << "ERROR: " << "device_result_iterator != " << "v.end()" << std::endl; return -1; } return 0; }
35813cb04f2a961f26ba554832ede885d5a8705b.cu
//---------------------------------------------------------------------------// // Copyright (c) 2015 Jakub Szuppe <j.szuppe@gmail.com> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #include <algorithm> #include <iostream> #include <vector> #include <thrust/find.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "perf.hpp" // Max integer that can be generated by rand_int() function. int rand_int_max = 25; int rand_int() { return static_cast<int>((rand() / double(RAND_MAX)) * rand_int_max); } int main(int argc, char *argv[]) { perf_parse_args(argc, argv); std::cout << "size: " << PERF_N << std::endl; // create vector of random numbers on the host thrust::host_vector<int> host_vector(PERF_N); thrust::generate(host_vector.begin(), host_vector.end(), rand_int); thrust::device_vector<int> v = host_vector; // trying to find element that isn't in vector (worst-case scenario) int wanted = rand_int_max + 1; // result thrust::device_vector<int>::iterator device_result_it; perf_timer t; for(size_t trial = 0; trial < PERF_TRIALS; trial++){ t.start(); device_result_it = thrust::find(v.begin(), v.end(), wanted); cudaDeviceSynchronize(); t.stop(); } std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl; // verify if(device_result_it != v.end()){ std::cout << "ERROR: " << "device_result_iterator != " << "v.end()" << std::endl; return -1; } return 0; }
810299f51f6a1eeac2f87512b43416ca5a8b684d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> typedef enum TARGET {HOST, DEVICE} TARGET; typedef enum MEMTYPE {NORMAL, PINNED} MEMTYPE; typedef struct { int width; int height; float *elements; } Matrix; __global__ void sgemm(Matrix A, Matrix B, Matrix C, const float alpha, const float beta, const int width, const int height) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx = idx_y * width + idx_x; if (idx_x >= width || idx_y >= height) return; float value = 0.f; for (int e = 0; e < width; e++) value = alpha * A.elements[idx_y * width + e] * B.elements[e * width + idx_x]; C.elements[idx] = value + beta * C.elements[idx]; } void InitMatrix(Matrix &mat, const int width, const int height, TARGET target = HOST, MEMTYPE memtype = NORMAL); int main(int argv, char* argc[]) { Matrix A, B, C; Matrix dA, dB, dC; const float alpha = 2.f; const float beta = .5f; const int width = 2048; const int height = 2048; float elapsed_gpu; double elapsed_cpu; // Select Host memory type (NORMAL, PINNED) MEMTYPE memtype = PINNED; // CUDA Event Create to estimate elased time hipEvent_t start, stop; struct timespec begin, finish; hipEventCreate(&start); hipEventCreate(&stop); // Initialize host matrix InitMatrix(A, width, height, HOST, memtype); InitMatrix(B, width, height, HOST, memtype); InitMatrix(C, width, height, HOST, memtype); // CUDA Memory Initialize InitMatrix(dA, width, height, DEVICE); InitMatrix(dB, width, height, DEVICE); InitMatrix(dC, width, height, DEVICE); // CUDA Operation hipEventRecord(start, 0); clock_gettime(CLOCK_MONOTONIC, &begin); // Copy host data to the device (CUDA global memory) // TODO: Write Asynchronous CUDA Memcpy API (gpu -> cpu) ////////////// // Launch GPU Kernel dim3 blockDim(16, 16); dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y); hipLaunchKernelGGL(( sgemm), dim3(gridDim), dim3(blockDim), 0, 0, dA, dB, dC, alpha, beta, width, height); // Copy computation result from the Device the host memory // TODO: Write Asynchronous CUDA Memcpy API (cpu -> gpu) ////////////// clock_gettime(CLOCK_MONOTONIC, &finish); hipEventRecord(stop, 0); // Estimate CUDA operation time hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_gpu, start, stop); printf("SGEMM CUDA Elapsed time: %f ms\n", elapsed_gpu); elapsed_cpu = (finish.tv_sec - begin.tv_sec); elapsed_cpu += (finish.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Host time: %f ms\n", elapsed_cpu * 1000); // finalize CUDA event hipEventDestroy(start); hipEventDestroy(stop); // Finalize hipFree(dA.elements); hipFree(dB.elements); hipFree(dC.elements); if (memtype == NORMAL) { free(A.elements); free(B.elements); free(C.elements); } else { // TODO: Write pinned memory free API ///////////// } return 0; } void InitMatrix(Matrix &mat, const int width, const int height, TARGET target, MEMTYPE memtype) { mat.width = width; mat.height = height; if (target == DEVICE) { hipMalloc((void**)&mat.elements, width * height * sizeof(float)); } else { if (memtype == NORMAL) mat.elements = (float*)malloc(width * height * sizeof(float)); else // TODO: Write pinned memory allocation API ///////////////////// for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { mat.elements[row * width + col] = row * width + col * 0.001; } } } }
810299f51f6a1eeac2f87512b43416ca5a8b684d.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> typedef enum TARGET {HOST, DEVICE} TARGET; typedef enum MEMTYPE {NORMAL, PINNED} MEMTYPE; typedef struct { int width; int height; float *elements; } Matrix; __global__ void sgemm(Matrix A, Matrix B, Matrix C, const float alpha, const float beta, const int width, const int height) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx = idx_y * width + idx_x; if (idx_x >= width || idx_y >= height) return; float value = 0.f; for (int e = 0; e < width; e++) value = alpha * A.elements[idx_y * width + e] * B.elements[e * width + idx_x]; C.elements[idx] = value + beta * C.elements[idx]; } void InitMatrix(Matrix &mat, const int width, const int height, TARGET target = HOST, MEMTYPE memtype = NORMAL); int main(int argv, char* argc[]) { Matrix A, B, C; Matrix dA, dB, dC; const float alpha = 2.f; const float beta = .5f; const int width = 2048; const int height = 2048; float elapsed_gpu; double elapsed_cpu; // Select Host memory type (NORMAL, PINNED) MEMTYPE memtype = PINNED; // CUDA Event Create to estimate elased time cudaEvent_t start, stop; struct timespec begin, finish; cudaEventCreate(&start); cudaEventCreate(&stop); // Initialize host matrix InitMatrix(A, width, height, HOST, memtype); InitMatrix(B, width, height, HOST, memtype); InitMatrix(C, width, height, HOST, memtype); // CUDA Memory Initialize InitMatrix(dA, width, height, DEVICE); InitMatrix(dB, width, height, DEVICE); InitMatrix(dC, width, height, DEVICE); // CUDA Operation cudaEventRecord(start, 0); clock_gettime(CLOCK_MONOTONIC, &begin); // Copy host data to the device (CUDA global memory) // TODO: Write Asynchronous CUDA Memcpy API (gpu -> cpu) ////////////// // Launch GPU Kernel dim3 blockDim(16, 16); dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y); sgemm<<<gridDim, blockDim>>>(dA, dB, dC, alpha, beta, width, height); // Copy computation result from the Device the host memory // TODO: Write Asynchronous CUDA Memcpy API (cpu -> gpu) ////////////// clock_gettime(CLOCK_MONOTONIC, &finish); cudaEventRecord(stop, 0); // Estimate CUDA operation time cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_gpu, start, stop); printf("SGEMM CUDA Elapsed time: %f ms\n", elapsed_gpu); elapsed_cpu = (finish.tv_sec - begin.tv_sec); elapsed_cpu += (finish.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Host time: %f ms\n", elapsed_cpu * 1000); // finalize CUDA event cudaEventDestroy(start); cudaEventDestroy(stop); // Finalize cudaFree(dA.elements); cudaFree(dB.elements); cudaFree(dC.elements); if (memtype == NORMAL) { free(A.elements); free(B.elements); free(C.elements); } else { // TODO: Write pinned memory free API ///////////// } return 0; } void InitMatrix(Matrix &mat, const int width, const int height, TARGET target, MEMTYPE memtype) { mat.width = width; mat.height = height; if (target == DEVICE) { cudaMalloc((void**)&mat.elements, width * height * sizeof(float)); } else { if (memtype == NORMAL) mat.elements = (float*)malloc(width * height * sizeof(float)); else // TODO: Write pinned memory allocation API ///////////////////// for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { mat.elements[row * width + col] = row * width + col * 0.001; } } } }
707c7d5aab7dec38d5010a8dd4b7b6503953345a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { __shared__ float Mds[TILED_WIDTH][TILED_WIDTH]; __shared__ float Nds[TILED_WIDTH][TILED_WIDTH]; int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int row = by * TILED_WIDTH + ty; int col = bx * TILED_WIDTH + tx; float p_sum=0.0f; for(int m=0; m<(M.width-1)/TILED_WIDTH+1; m++) { if((m*TILED_WIDTH+tx)<M.width&&row<M.height) Mds[ty][tx] = M.elements[row*M.width+(m*TILED_WIDTH+tx)]; else Mds[ty][tx] = 0.0; if((m*TILED_WIDTH+ty)<N.height&&col<N.width) Nds[ty][tx] = N.elements[(m*TILED_WIDTH+ty)*N.width+col]; else Nds[ty][tx] = 0.0; __syncthreads(); for(int n=0; n<TILED_WIDTH; n++) { p_sum += Mds[ty][n]*Nds[n][tx]; } __syncthreads(); } if(row<P.height&&col<P.width) P.elements[ row*P.width+col] = p_sum; } #endif // #ifndef _MATRIXMUL_KERNEL_H_
707c7d5aab7dec38d5010a8dd4b7b6503953345a.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { __shared__ float Mds[TILED_WIDTH][TILED_WIDTH]; __shared__ float Nds[TILED_WIDTH][TILED_WIDTH]; int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int row = by * TILED_WIDTH + ty; int col = bx * TILED_WIDTH + tx; float p_sum=0.0f; for(int m=0; m<(M.width-1)/TILED_WIDTH+1; m++) { if((m*TILED_WIDTH+tx)<M.width&&row<M.height) Mds[ty][tx] = M.elements[row*M.width+(m*TILED_WIDTH+tx)]; else Mds[ty][tx] = 0.0; if((m*TILED_WIDTH+ty)<N.height&&col<N.width) Nds[ty][tx] = N.elements[(m*TILED_WIDTH+ty)*N.width+col]; else Nds[ty][tx] = 0.0; __syncthreads(); for(int n=0; n<TILED_WIDTH; n++) { p_sum += Mds[ty][n]*Nds[n][tx]; } __syncthreads(); } if(row<P.height&&col<P.width) P.elements[ row*P.width+col] = p_sum; } #endif // #ifndef _MATRIXMUL_KERNEL_H_
caba04463d4098f81a797be879e1c0828eaf2276.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "MergeUtil.h" #include "util.h" /*struct cmp : public binary_function<int, int, bool> { const int *new_indecies; const int *map_to_new; const int *p_1; const int *p_2; const int *c_1; const int *c_2; const int *count_1; const int *count_2; int n_1; cmp(const int *new_indecies, const int *map_to_new, const int *p_1, const int *p_2, const int *c_1, const int *c_2, const int *count_1, const int *count_2, int n_1) : new_indecies(new_indecies), map_to_new(map_to_new), p_1(p_1), p_2(p_2), c_1(c_1), c_2(c_2), count_1(count_1), count_2(count_2), n_1(n_1) {} *//** * * @param i * @param j * @return is node i before(<) node j? *//* __device__ bool operator()(const int i, const int j) const { int c_i = c_1[i]; int c_j = c_2[j]; int p_i = p_1[i]; int p_j = p_2[j]; int count_i = count_1[i]; int count_j = count_2[j]; int new_p_i = new_indecies[map_to_new[p_i]]; int new_p_j = new_indecies[map_to_new[p_j + n_1]]; if (p_i == i && p_j == j) //if both is root return false; if (p_i == i) //only i is root return true; if (p_j == j) //onlyj is root return false; if (new_p_i != new_p_j)//parents are not the same return new_p_i < new_p_j; //they have the same parent if (count_i > -1 && count_j > -1)//both are not s-connection return c_i < c_j;//order by cell_no if (count_i > -1) return true;//only i is not s-connection if (count_j > -1) return false;//only j is not s-connection //both are s-connections return c_i < c_j;//order by cell_no } };*/ __global__ void merge_move(int *cells_1, int *cells_2, int *cells_3, int *parents_1, int *parents_2, int *parents_3, int *counts_1, int *counts_2, int *counts_3, int *new_indecies, int *map_to_new, int *map_to_old, int n_total, int n_1) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n_total) return; int i_per = map_to_old[i]; int i_old = i_per < n_1 ? i_per : i_per - n_1; int i_new = new_indecies[i] - 1; int *cells = i_per < n_1 ? cells_1 : cells_2; int *parents = i_per < n_1 ? parents_1 : parents_2; int *counts = i_per < n_1 ? counts_1 : counts_2; cells_3[i_new] = cells[i_old]; if (counts[i_old] >= 0) { atomicAdd(&counts_3[i_new], counts[i_old]); } else { counts_3[i_new] = -1; } parents_3[i_new] = new_indecies[map_to_new[parents[i_old] + (i_per < n_1 ? 0 : n_1)]] - 1;//todo should there actually be minus one? // if (parents_3[i_new] == 74) { // printf("\n\n it is 74 at i:%d, i_new:%d, i_old:%d, i_per:%d, parents[i_old]:%d, map_to_new[..]:%d, new_indecies[..]:%d\n\n", // i, i_new, i_old, i_per, parents[i_old], // map_to_new[parents[i_old] + (i_per < n_1 ? 0 : n_1)], // new_indecies[map_to_new[parents[i_old] + (i_per < n_1 ? 0 : n_1)]]); // } } __global__ void merge_update_dim(int *dim_start_1, int *dims_1, int *dim_start_2, int *dims_2, int *dim_start_3, int *dims_3, int *new_indecies, int *map_to_new, int d, int n_1) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < d) { dims_3[i] = dims_1[i];//could just be copied using hipMemcpy //going back to the node before and check where it is +1 to find where the next level starts int i_1 = dim_start_1[i] - 1; int i_2 = dim_start_2[i] - 1; // maybe only if is included - hmm i think it is okay because we only inc new_indecies if included int i_1_new = new_indecies[map_to_new[i_1]]; int i_2_new = new_indecies[map_to_new[i_2 + n_1]]; dim_start_3[i] = max(i_1_new, i_2_new); } } __global__ void merge_check_path_from_pivots(int start_1, int start_2, int end_1, int end_2, int *map_to_old, int *map_to_new, int *pivots_1, int *pivots_2, int n_1, int n_2, int n_total, int step, cmp c) { //https://web.cs.ucdavis.edu/~amenta/f15/GPUmp.pdf: GPU Merge Path - A GPU Merging Algorithm //also see Merge path - parallel merging made simple. In Parallel and Distributed Processing Symposium, International,may2012. // int j = blockIdx.x * blockDim.x + threadIdx.x; // int i = j * step + start_1 + start_2; // if (i >= end_1 + end_2) // return; int j = threadIdx.x; int i = j * step + start_1 + start_2; while(i < end_1 + end_2) { int m_1 = pivots_1[j]; int m_2 = pivots_2[j]; //check for (int s = 0; s < step && i + s < end_1 + end_2; s++) { // printf("test 1\n"); // printf("(m_1 < end_1 && m_2 < end_2), (%d < %d && %d < %d), s:%d\n", m_1, end_1, m_2, end_2, s); bool on = (m_1 < end_1 && m_2 < end_2) ? c(m_1, m_2) : (m_2 == end_2); //todo i think there is a problem here!!! what if (m_2 == end_2) && (m_1 == end_1) // printf("test 2\n"); // if (m_2 == 163) { // printf("(m_1 < end_1 && m_2 < end_2), (%d < %d && %d < %d), s:%d\n", m_1, end_1, m_2, end_2, s); // if (on) { // printf("on\n"); // } else { // printf("off\n"); // } // } if (on) { // if (map_to_new[m_1] >= 0) { // printf("on pivots_1[j]:%d, pivots_2[j]:%d, (start_1 <= m_1 < end_1 && start_2 <= m_2 < end_2), (%d <= %d < %d && %d <= %d < %d), s:%d, i+s:%d, map_to_new[m_1]:%d\n", // pivots_1[j], pivots_2[j], start_1, m_1, end_1, start_2, m_2, end_2, s, i + s, map_to_new[m_1]); // } map_to_old[i + s] = m_1; map_to_new[m_1] = i + s; m_1++; } else { // if (map_to_new[m_2 + n_1] >= 0) { // printf("off pivots_1[j]:%d, pivots_2[j]:%d, (start_1 <= m_1 < end_1 && start_2 <= m_2 < end_2), (%d <= %d < %d && %d <= %d < %d), s:%d, i+s:%d, map_to_new[m_2]:%d\n", // pivots_1[j], pivots_2[j], start_1, m_1, end_1, start_2, m_2, end_2, s, i + s, map_to_new[m_2 + n_1]); // } map_to_old[i + s] = m_2 + n_1; map_to_new[m_2 + n_1] = i + s; m_2++; } } j += blockDim.x; i = j * step + start_1 + start_2; } } __global__ void compute_is_included_from_path(int start_1, int start_2, int *is_included, int *map_to_old, int *d_parents_1, int *d_parents_2, int *d_cells_1, int *d_cells_2, int *d_counts_1, int *d_counts_2, int n_1, int n_total) { int k = blockIdx.x * blockDim.x + threadIdx.x + start_1 + start_2; if (k == 0) { is_included[k] = 1; } else if (k < n_total) { int j = map_to_old[k - 1]; int i = map_to_old[k]; const int *d_parents_l = i < n_1 ? d_parents_1 : d_parents_2; const int *d_parents_r = j < n_1 ? d_parents_1 : d_parents_2; const int *d_cells_l = i < n_1 ? d_cells_1 : d_cells_2; const int *d_cells_r = j < n_1 ? d_cells_1 : d_cells_2; const int *d_counts_l = i < n_1 ? d_counts_1 : d_counts_2; const int *d_counts_r = j < n_1 ? d_counts_1 : d_counts_2; int parent_i = d_parents_l[i < n_1 ? i : i - n_1]; int parent_j = d_parents_r[j < n_1 ? j : j - n_1]; int cell_i = d_cells_l[i < n_1 ? i : i - n_1]; int cell_j = d_cells_r[j < n_1 ? j : j - n_1]; int count_i = d_counts_l[i < n_1 ? i : i - n_1]; int count_j = d_counts_r[j < n_1 ? j : j - n_1]; is_included[k] = 0; while (true) { if ((count_i >= 0 && count_j < 0) || (count_i < 0 && count_j >= 0)) { is_included[k] = 1; break; } if (cell_i != cell_j) { is_included[k] = 1; break; } if (parent_i == 0 && parent_j == 0) { break; } if (parent_j == 0 || parent_i == 0) { is_included[k] = 1; break; } cell_i = d_cells_l[parent_i]; cell_j = d_cells_r[parent_j]; //todo new start count_i = d_counts_l[parent_i]; count_j = d_counts_r[parent_j]; //todo new end parent_i = d_parents_l[parent_i]; parent_j = d_parents_r[parent_j]; } } } __global__ void clone(int *to, int *from, int size) { for (int i = threadIdx.x; i < size; i += blockDim.x) { to[i] = from[i]; } } __global__ void merge_search_for_pivots(int start_1, int start_2, int end_1, int end_2, int *pivots_1, int *pivots_2, int number_of_nodes_1, int number_of_nodes_2, int number_of_nodes_total, int step, cmp c) { //this is very close to the code from: //https://web.cs.ucdavis.edu/~amenta/f15/GPUmp.pdf: GPU Merge Path - A GPU Merging Algorithm //also see Merge path - parallel merging made simple. In Parallel and Distributed Processing Symposium, International,may2012. int j = blockIdx.x * blockDim.x + threadIdx.x; int i = j * step; int length_1 = end_1 - start_1; int length_2 = end_2 - start_2; if (i >= length_1 + length_2) return; //binary search int r_1 = min(end_1, start_1 + i); int r_2 = start_2 + max(0, i - (length_1)); int l_1 = start_1 + max(0, i - (length_2)); int l_2 = min(end_2, start_2 + i); int m_1 = 0; int m_2 = 0; // printf("test 0, %d\n", j); while (true) {//L <= R: int offset = (r_1 - l_1) / 2; m_1 = r_1 - offset; m_2 = r_2 + offset; if (!(start_1 <= m_1 && m_1 <= end_1 && start_2 <= m_2 && m_2 <= end_2)) { printf("i:%d, j:%d, m_1:%d, m_2:%d\n", i, j, m_1, m_2); } // printf("test 1, %d\n", j); // bool not_above = (m_2 == 0 || m_1 == end_1 || !c(m_1, m_2 - 1)); // bool left_off = (m_1 == 0 || m_2 == end_2 || c(m_1 - 1, m_2)); bool not_above = (m_2 <= start_2 || m_1 >= end_1 || !c(m_1, m_2 - 1)); bool left_off = (m_1 <= start_1 || m_2 >= end_2 || c(m_1 - 1, m_2)); // printf("test 2, %d\n", j); if (not_above) { if (left_off) { break; } else { r_1 = m_1 - 1; r_2 = m_2 + 1; } } else { l_1 = m_1 + 1; l_2 = m_2 - 1; } } pivots_1[j] = m_1; pivots_2[j] = m_2; // printf("test 3, %d\n", j); } __global__ void points_move(int *d_points_1, int *d_points_placement_1, int number_of_points_1, int number_of_nodes_1, int *d_points_2, int *d_points_placement_2, int number_of_points_2, int *d_points_3, int *d_points_placement_3, int number_of_points_3, int *new_indecies, int *map_to_new) { int i = blockIdx.x * blockDim.x + threadIdx.x; int *d_points, *d_points_placement; int points_offset; int nodes_offset; if (i >= number_of_points_3) return; if (i < number_of_points_1) { points_offset = 0; nodes_offset = 0; d_points = d_points_1; d_points_placement = d_points_placement_1; } else { points_offset = number_of_points_1; nodes_offset = number_of_nodes_1; d_points = d_points_2; d_points_placement = d_points_placement_2; } d_points_3[i] = d_points[i - points_offset];//could just be copied using hipMemcpy d_points_placement_3[i] = new_indecies[map_to_new[d_points_placement[i - points_offset] + nodes_offset]] - 1; }
caba04463d4098f81a797be879e1c0828eaf2276.cu
#include "MergeUtil.h" #include "util.h" /*struct cmp : public binary_function<int, int, bool> { const int *new_indecies; const int *map_to_new; const int *p_1; const int *p_2; const int *c_1; const int *c_2; const int *count_1; const int *count_2; int n_1; cmp(const int *new_indecies, const int *map_to_new, const int *p_1, const int *p_2, const int *c_1, const int *c_2, const int *count_1, const int *count_2, int n_1) : new_indecies(new_indecies), map_to_new(map_to_new), p_1(p_1), p_2(p_2), c_1(c_1), c_2(c_2), count_1(count_1), count_2(count_2), n_1(n_1) {} *//** * * @param i * @param j * @return is node i before(<) node j? *//* __device__ bool operator()(const int i, const int j) const { int c_i = c_1[i]; int c_j = c_2[j]; int p_i = p_1[i]; int p_j = p_2[j]; int count_i = count_1[i]; int count_j = count_2[j]; int new_p_i = new_indecies[map_to_new[p_i]]; int new_p_j = new_indecies[map_to_new[p_j + n_1]]; if (p_i == i && p_j == j) //if both is root return false; if (p_i == i) //only i is root return true; if (p_j == j) //only´j is root return false; if (new_p_i != new_p_j)//parents are not the same return new_p_i < new_p_j; //they have the same parent if (count_i > -1 && count_j > -1)//both are not s-connection return c_i < c_j;//order by cell_no if (count_i > -1) return true;//only i is not s-connection if (count_j > -1) return false;//only j is not s-connection //both are s-connections return c_i < c_j;//order by cell_no } };*/ __global__ void merge_move(int *cells_1, int *cells_2, int *cells_3, int *parents_1, int *parents_2, int *parents_3, int *counts_1, int *counts_2, int *counts_3, int *new_indecies, int *map_to_new, int *map_to_old, int n_total, int n_1) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n_total) return; int i_per = map_to_old[i]; int i_old = i_per < n_1 ? i_per : i_per - n_1; int i_new = new_indecies[i] - 1; int *cells = i_per < n_1 ? cells_1 : cells_2; int *parents = i_per < n_1 ? parents_1 : parents_2; int *counts = i_per < n_1 ? counts_1 : counts_2; cells_3[i_new] = cells[i_old]; if (counts[i_old] >= 0) { atomicAdd(&counts_3[i_new], counts[i_old]); } else { counts_3[i_new] = -1; } parents_3[i_new] = new_indecies[map_to_new[parents[i_old] + (i_per < n_1 ? 0 : n_1)]] - 1;//todo should there actually be minus one? // if (parents_3[i_new] == 74) { // printf("\n\n it is 74 at i:%d, i_new:%d, i_old:%d, i_per:%d, parents[i_old]:%d, map_to_new[..]:%d, new_indecies[..]:%d\n\n", // i, i_new, i_old, i_per, parents[i_old], // map_to_new[parents[i_old] + (i_per < n_1 ? 0 : n_1)], // new_indecies[map_to_new[parents[i_old] + (i_per < n_1 ? 0 : n_1)]]); // } } __global__ void merge_update_dim(int *dim_start_1, int *dims_1, int *dim_start_2, int *dims_2, int *dim_start_3, int *dims_3, int *new_indecies, int *map_to_new, int d, int n_1) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < d) { dims_3[i] = dims_1[i];//could just be copied using cudaMemcpy //going back to the node before and check where it is +1 to find where the next level starts int i_1 = dim_start_1[i] - 1; int i_2 = dim_start_2[i] - 1; // maybe only if is included - hmm i think it is okay because we only inc new_indecies if included int i_1_new = new_indecies[map_to_new[i_1]]; int i_2_new = new_indecies[map_to_new[i_2 + n_1]]; dim_start_3[i] = max(i_1_new, i_2_new); } } __global__ void merge_check_path_from_pivots(int start_1, int start_2, int end_1, int end_2, int *map_to_old, int *map_to_new, int *pivots_1, int *pivots_2, int n_1, int n_2, int n_total, int step, cmp c) { //https://web.cs.ucdavis.edu/~amenta/f15/GPUmp.pdf: GPU Merge Path - A GPU Merging Algorithm //also see Merge path - parallel merging made simple. In Parallel and Distributed Processing Symposium, International,may2012. // int j = blockIdx.x * blockDim.x + threadIdx.x; // int i = j * step + start_1 + start_2; // if (i >= end_1 + end_2) // return; int j = threadIdx.x; int i = j * step + start_1 + start_2; while(i < end_1 + end_2) { int m_1 = pivots_1[j]; int m_2 = pivots_2[j]; //check for (int s = 0; s < step && i + s < end_1 + end_2; s++) { // printf("test 1\n"); // printf("(m_1 < end_1 && m_2 < end_2), (%d < %d && %d < %d), s:%d\n", m_1, end_1, m_2, end_2, s); bool on = (m_1 < end_1 && m_2 < end_2) ? c(m_1, m_2) : (m_2 == end_2); //todo i think there is a problem here!!! what if (m_2 == end_2) && (m_1 == end_1) // printf("test 2\n"); // if (m_2 == 163) { // printf("(m_1 < end_1 && m_2 < end_2), (%d < %d && %d < %d), s:%d\n", m_1, end_1, m_2, end_2, s); // if (on) { // printf("on\n"); // } else { // printf("off\n"); // } // } if (on) { // if (map_to_new[m_1] >= 0) { // printf("on pivots_1[j]:%d, pivots_2[j]:%d, (start_1 <= m_1 < end_1 && start_2 <= m_2 < end_2), (%d <= %d < %d && %d <= %d < %d), s:%d, i+s:%d, map_to_new[m_1]:%d\n", // pivots_1[j], pivots_2[j], start_1, m_1, end_1, start_2, m_2, end_2, s, i + s, map_to_new[m_1]); // } map_to_old[i + s] = m_1; map_to_new[m_1] = i + s; m_1++; } else { // if (map_to_new[m_2 + n_1] >= 0) { // printf("off pivots_1[j]:%d, pivots_2[j]:%d, (start_1 <= m_1 < end_1 && start_2 <= m_2 < end_2), (%d <= %d < %d && %d <= %d < %d), s:%d, i+s:%d, map_to_new[m_2]:%d\n", // pivots_1[j], pivots_2[j], start_1, m_1, end_1, start_2, m_2, end_2, s, i + s, map_to_new[m_2 + n_1]); // } map_to_old[i + s] = m_2 + n_1; map_to_new[m_2 + n_1] = i + s; m_2++; } } j += blockDim.x; i = j * step + start_1 + start_2; } } __global__ void compute_is_included_from_path(int start_1, int start_2, int *is_included, int *map_to_old, int *d_parents_1, int *d_parents_2, int *d_cells_1, int *d_cells_2, int *d_counts_1, int *d_counts_2, int n_1, int n_total) { int k = blockIdx.x * blockDim.x + threadIdx.x + start_1 + start_2; if (k == 0) { is_included[k] = 1; } else if (k < n_total) { int j = map_to_old[k - 1]; int i = map_to_old[k]; const int *d_parents_l = i < n_1 ? d_parents_1 : d_parents_2; const int *d_parents_r = j < n_1 ? d_parents_1 : d_parents_2; const int *d_cells_l = i < n_1 ? d_cells_1 : d_cells_2; const int *d_cells_r = j < n_1 ? d_cells_1 : d_cells_2; const int *d_counts_l = i < n_1 ? d_counts_1 : d_counts_2; const int *d_counts_r = j < n_1 ? d_counts_1 : d_counts_2; int parent_i = d_parents_l[i < n_1 ? i : i - n_1]; int parent_j = d_parents_r[j < n_1 ? j : j - n_1]; int cell_i = d_cells_l[i < n_1 ? i : i - n_1]; int cell_j = d_cells_r[j < n_1 ? j : j - n_1]; int count_i = d_counts_l[i < n_1 ? i : i - n_1]; int count_j = d_counts_r[j < n_1 ? j : j - n_1]; is_included[k] = 0; while (true) { if ((count_i >= 0 && count_j < 0) || (count_i < 0 && count_j >= 0)) { is_included[k] = 1; break; } if (cell_i != cell_j) { is_included[k] = 1; break; } if (parent_i == 0 && parent_j == 0) { break; } if (parent_j == 0 || parent_i == 0) { is_included[k] = 1; break; } cell_i = d_cells_l[parent_i]; cell_j = d_cells_r[parent_j]; //todo new start count_i = d_counts_l[parent_i]; count_j = d_counts_r[parent_j]; //todo new end parent_i = d_parents_l[parent_i]; parent_j = d_parents_r[parent_j]; } } } __global__ void clone(int *to, int *from, int size) { for (int i = threadIdx.x; i < size; i += blockDim.x) { to[i] = from[i]; } } __global__ void merge_search_for_pivots(int start_1, int start_2, int end_1, int end_2, int *pivots_1, int *pivots_2, int number_of_nodes_1, int number_of_nodes_2, int number_of_nodes_total, int step, cmp c) { //this is very close to the code from: //https://web.cs.ucdavis.edu/~amenta/f15/GPUmp.pdf: GPU Merge Path - A GPU Merging Algorithm //also see Merge path - parallel merging made simple. In Parallel and Distributed Processing Symposium, International,may2012. int j = blockIdx.x * blockDim.x + threadIdx.x; int i = j * step; int length_1 = end_1 - start_1; int length_2 = end_2 - start_2; if (i >= length_1 + length_2) return; //binary search int r_1 = min(end_1, start_1 + i); int r_2 = start_2 + max(0, i - (length_1)); int l_1 = start_1 + max(0, i - (length_2)); int l_2 = min(end_2, start_2 + i); int m_1 = 0; int m_2 = 0; // printf("test 0, %d\n", j); while (true) {//L <= R: int offset = (r_1 - l_1) / 2; m_1 = r_1 - offset; m_2 = r_2 + offset; if (!(start_1 <= m_1 && m_1 <= end_1 && start_2 <= m_2 && m_2 <= end_2)) { printf("i:%d, j:%d, m_1:%d, m_2:%d\n", i, j, m_1, m_2); } // printf("test 1, %d\n", j); // bool not_above = (m_2 == 0 || m_1 == end_1 || !c(m_1, m_2 - 1)); // bool left_off = (m_1 == 0 || m_2 == end_2 || c(m_1 - 1, m_2)); bool not_above = (m_2 <= start_2 || m_1 >= end_1 || !c(m_1, m_2 - 1)); bool left_off = (m_1 <= start_1 || m_2 >= end_2 || c(m_1 - 1, m_2)); // printf("test 2, %d\n", j); if (not_above) { if (left_off) { break; } else { r_1 = m_1 - 1; r_2 = m_2 + 1; } } else { l_1 = m_1 + 1; l_2 = m_2 - 1; } } pivots_1[j] = m_1; pivots_2[j] = m_2; // printf("test 3, %d\n", j); } __global__ void points_move(int *d_points_1, int *d_points_placement_1, int number_of_points_1, int number_of_nodes_1, int *d_points_2, int *d_points_placement_2, int number_of_points_2, int *d_points_3, int *d_points_placement_3, int number_of_points_3, int *new_indecies, int *map_to_new) { int i = blockIdx.x * blockDim.x + threadIdx.x; int *d_points, *d_points_placement; int points_offset; int nodes_offset; if (i >= number_of_points_3) return; if (i < number_of_points_1) { points_offset = 0; nodes_offset = 0; d_points = d_points_1; d_points_placement = d_points_placement_1; } else { points_offset = number_of_points_1; nodes_offset = number_of_nodes_1; d_points = d_points_2; d_points_placement = d_points_placement_2; } d_points_3[i] = d_points[i - points_offset];//could just be copied using cudaMemcpy d_points_placement_3[i] = new_indecies[map_to_new[d_points_placement[i - points_offset] + nodes_offset]] - 1; }
004e7eb62d7633c437d1d051acc056a5d5139fbe.hip
// !!! This is a file automatically generated by hipify!!! /* hw5_blas.cu: Apply the power method to find the largest eigenvalue / eigenvector pair for a matrix using the cuBLAS library on a GPU. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <assert.h> #include <hip/hip_runtime.h> #include "rocblas.h" //-------------------------------------------------------------------------- // Fortran column major indexing. We don't actually need the number // of columns (n), but we'll include it as an argument anyway for when // we do C-style (row major) indexing. #define IDX2C(i,j,m,n) (((j)*(m))+(i)) //-------------------------------------------------------------------------- // Just in case there is a cuBLAS error. const char *cublasGetErrorString(hipblasStatus_t status) { switch(status) { case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS"; case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED"; case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED"; case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE"; case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH"; case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR"; case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED"; case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR"; case HIPBLAS_STATUS_NOT_SUPPORTED: return "HIPBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; default: return "UNKNOWN!!!"; } } //-------------------------------------------------------------------------- // Mark's code to handle CUDA errors. #define cudaTry(cudaStatus) _cudaTry(cudaStatus, __FILE__, __LINE__) void _cudaTry(hipError_t cudaStatus, const char *fileName, int lineNumber) { if(cudaStatus != hipSuccess) { fprintf(stderr, "%s in %s line %d\n", hipGetErrorString(cudaStatus), fileName, lineNumber); exit(1); } } // Ian's code to handle cuBLAS errors. #define cublasTry(cublasStatus) _cublasTry(cublasStatus, __FILE__, __LINE__) void _cublasTry(hipblasStatus_t cublasStatus, const char *fileName, int lineNumber) { if (cublasStatus != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "%s in %s line %d\n", cublasGetErrorString(cublasStatus), fileName, lineNumber); exit(EXIT_FAILURE); } } //-------------------------------------------------------------------------- // Load data array from a file. File is assumed to be ASCII and // contain rows * columns floats (in column major order). Each entry // is separated from the next by a newline. It is assumed that input // array data has already been allocated sufficient memory. void load_data(const char *filename, const uint rows, const uint cols, float *data) { FILE *fh = fopen(filename, "r"); if(fh == NULL) { fprintf(stderr, "Unable to open file %s for input", filename); exit(1); } // Load the data. for(uint i = 0; i < rows; i++) for(uint j = 0; j < cols; j++) { if(fscanf(fh, "%e", &data[IDX2C(i, j, rows, cols)]) != 1) { fprintf(stderr, "Unable to read element (%d,%d) of file %s", i, j, filename); exit(1); } } fclose(fh); } //-------------------------------------------------------------------------- // Save data array into a file. File is written in ASCII format and // contains rows * cols of floats (in column major order). Each entry // is separated from the next by a newline. void save_data(const char *filename, const uint rows, const uint cols, float *data) { FILE *fh = fopen(filename, "w"); if(fh == NULL) { fprintf(stderr, "Unable to open file %s for output", filename); exit(1); } // Save the data. for(uint i = 0; i < rows; i++) for(uint j = 0; j < cols; j++) if(fprintf(fh, "%e\n", data[IDX2C(i, j, rows, cols)]) == 0) { fprintf(stderr, "Unable to write element (%d,%d) of file %s", i, j, filename); exit(1); } fclose(fh); } //-------------------------------------------------------------------------- // Run the power iteration on matrix A of size n x n, starting with // vector b0 for k iterations. Assuming that convergence has been // reached, return the magnitude of the largest eignenvalue (a scalar) // in lambda_mag and the corresponding eigenvector in bk. Caller // should allocate space for a, b0, lambda_mag and bk. void power_iteration_cpu(const uint n, const float *a, const float *b0, const uint k, float *lambda_mag, float *bk) { hipblasHandle_t handle; float lambda, lambda_inv; // Constants for the BLAS calls. float alpha = 1.0; float beta = 0.0; // Set up cuBLAS. cublasTry(hipblasCreate(&handle)); // Allocate space on the GPU. float *dev_a, *dev_bk; cudaTry(hipMalloc((void **)(&(dev_a)), n * n * sizeof(float))); cudaTry(hipMalloc((void **)(&(dev_bk)), n * sizeof(float))); // Copy over the input data. cublasTry(hipblasSetMatrix(n, n, sizeof(*a), a, n, dev_a, n)); cublasTry(hipblasSetVector(n, sizeof(*b0), b0, 1, dev_bk, 1)); // Remaining iterations can use bk as the input vector. for(uint i = 0; i < k; i++) { // bk <- A * bk. cublasTry(hipblasSgemv(handle, HIPBLAS_OP_N, n, n, &alpha, dev_a, n, dev_bk, 1, &beta, dev_bk, 1)); // lambda <- ||bk||. cublasTry(hipblasSnrm2(handle, n, dev_bk, 1, &lambda)); // bk <- bk / lambda. lambda_inv = 1.0f / lambda; cublasTry(hipblasSscal(handle, n, &lambda_inv, dev_bk, 1)); } // Get the final value of bk as the approximation of the eigenvector. cublasTry(hipblasGetVector(n, sizeof(*dev_bk), dev_bk, 1, bk, 1)); // Return the estimate of the magnitude of the largest eigenvalue. // The estimated eigenvector is already stored in bk. *lambda_mag = lambda; // Free the device memory. cudaTry(hipFree((void *)(dev_a))); cudaTry(hipFree((void *)(dev_bk))); } int main(int argc, char **argv) { if((argc < 5) || (argc > 6)) { fprintf(stderr, "\nusage: hw5_blas <matrix_size> <filename_A> %s\n", "<filename_b0> <iterations> [filename_bk]"); exit(1); } // Figure out the problem. int n = atoi(argv[1]); char *file_A = argv[2]; char *file_b0 = argv[3]; int k = atoi(argv[4]); printf("\nPower iteration parameters:\n n = %d", n); printf("\n filename_A = %s\n filename_b0 = %s\n iterations = %d\n", file_A, file_b0, k); // Allocate some memory. float *a = (float *)malloc(n * n * sizeof(float)); assert(a != NULL); float *b0 = (float *)malloc(n * sizeof(float)); assert(b0 != NULL); float *bk = (float *)malloc(n * sizeof(float)); assert(bk != NULL); float lambda_mag; // Load the input data. load_data(file_A, n, n, a); load_data(file_b0, n, 1, b0); // Run the power iteration. power_iteration_cpu(n, a, b0, k, &lambda_mag, bk); // If necessary, save the eigenvector in the output file. if(argc == 6) { char *file_bk = argv[6]; save_data(file_bk, n, 1, bk); } // Print the estimated eigenvector magnitude. printf("\nAfter %d iterations, magnitude of largest eigenvalue: %5.3f\n", k, lambda_mag); // Free up all the memory. free(a); free(b0); free(bk); exit(0); }
004e7eb62d7633c437d1d051acc056a5d5139fbe.cu
/* hw5_blas.cu: Apply the power method to find the largest eigenvalue / eigenvector pair for a matrix using the cuBLAS library on a GPU. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <assert.h> #include <cuda_runtime.h> #include "cublas_v2.h" //-------------------------------------------------------------------------- // Fortran column major indexing. We don't actually need the number // of columns (n), but we'll include it as an argument anyway for when // we do C-style (row major) indexing. #define IDX2C(i,j,m,n) (((j)*(m))+(i)) //-------------------------------------------------------------------------- // Just in case there is a cuBLAS error. const char *cublasGetErrorString(cublasStatus_t status) { switch(status) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; default: return "UNKNOWN!!!"; } } //-------------------------------------------------------------------------- // Mark's code to handle CUDA errors. #define cudaTry(cudaStatus) _cudaTry(cudaStatus, __FILE__, __LINE__) void _cudaTry(cudaError_t cudaStatus, const char *fileName, int lineNumber) { if(cudaStatus != cudaSuccess) { fprintf(stderr, "%s in %s line %d\n", cudaGetErrorString(cudaStatus), fileName, lineNumber); exit(1); } } // Ian's code to handle cuBLAS errors. #define cublasTry(cublasStatus) _cublasTry(cublasStatus, __FILE__, __LINE__) void _cublasTry(cublasStatus_t cublasStatus, const char *fileName, int lineNumber) { if (cublasStatus != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "%s in %s line %d\n", cublasGetErrorString(cublasStatus), fileName, lineNumber); exit(EXIT_FAILURE); } } //-------------------------------------------------------------------------- // Load data array from a file. File is assumed to be ASCII and // contain rows * columns floats (in column major order). Each entry // is separated from the next by a newline. It is assumed that input // array data has already been allocated sufficient memory. void load_data(const char *filename, const uint rows, const uint cols, float *data) { FILE *fh = fopen(filename, "r"); if(fh == NULL) { fprintf(stderr, "Unable to open file %s for input", filename); exit(1); } // Load the data. for(uint i = 0; i < rows; i++) for(uint j = 0; j < cols; j++) { if(fscanf(fh, "%e", &data[IDX2C(i, j, rows, cols)]) != 1) { fprintf(stderr, "Unable to read element (%d,%d) of file %s", i, j, filename); exit(1); } } fclose(fh); } //-------------------------------------------------------------------------- // Save data array into a file. File is written in ASCII format and // contains rows * cols of floats (in column major order). Each entry // is separated from the next by a newline. void save_data(const char *filename, const uint rows, const uint cols, float *data) { FILE *fh = fopen(filename, "w"); if(fh == NULL) { fprintf(stderr, "Unable to open file %s for output", filename); exit(1); } // Save the data. for(uint i = 0; i < rows; i++) for(uint j = 0; j < cols; j++) if(fprintf(fh, "%e\n", data[IDX2C(i, j, rows, cols)]) == 0) { fprintf(stderr, "Unable to write element (%d,%d) of file %s", i, j, filename); exit(1); } fclose(fh); } //-------------------------------------------------------------------------- // Run the power iteration on matrix A of size n x n, starting with // vector b0 for k iterations. Assuming that convergence has been // reached, return the magnitude of the largest eignenvalue (a scalar) // in lambda_mag and the corresponding eigenvector in bk. Caller // should allocate space for a, b0, lambda_mag and bk. void power_iteration_cpu(const uint n, const float *a, const float *b0, const uint k, float *lambda_mag, float *bk) { cublasHandle_t handle; float lambda, lambda_inv; // Constants for the BLAS calls. float alpha = 1.0; float beta = 0.0; // Set up cuBLAS. cublasTry(cublasCreate(&handle)); // Allocate space on the GPU. float *dev_a, *dev_bk; cudaTry(cudaMalloc((void **)(&(dev_a)), n * n * sizeof(float))); cudaTry(cudaMalloc((void **)(&(dev_bk)), n * sizeof(float))); // Copy over the input data. cublasTry(cublasSetMatrix(n, n, sizeof(*a), a, n, dev_a, n)); cublasTry(cublasSetVector(n, sizeof(*b0), b0, 1, dev_bk, 1)); // Remaining iterations can use bk as the input vector. for(uint i = 0; i < k; i++) { // bk <- A * bk. cublasTry(cublasSgemv(handle, CUBLAS_OP_N, n, n, &alpha, dev_a, n, dev_bk, 1, &beta, dev_bk, 1)); // lambda <- ||bk||. cublasTry(cublasSnrm2(handle, n, dev_bk, 1, &lambda)); // bk <- bk / lambda. lambda_inv = 1.0f / lambda; cublasTry(cublasSscal(handle, n, &lambda_inv, dev_bk, 1)); } // Get the final value of bk as the approximation of the eigenvector. cublasTry(cublasGetVector(n, sizeof(*dev_bk), dev_bk, 1, bk, 1)); // Return the estimate of the magnitude of the largest eigenvalue. // The estimated eigenvector is already stored in bk. *lambda_mag = lambda; // Free the device memory. cudaTry(cudaFree((void *)(dev_a))); cudaTry(cudaFree((void *)(dev_bk))); } int main(int argc, char **argv) { if((argc < 5) || (argc > 6)) { fprintf(stderr, "\nusage: hw5_blas <matrix_size> <filename_A> %s\n", "<filename_b0> <iterations> [filename_bk]"); exit(1); } // Figure out the problem. int n = atoi(argv[1]); char *file_A = argv[2]; char *file_b0 = argv[3]; int k = atoi(argv[4]); printf("\nPower iteration parameters:\n n = %d", n); printf("\n filename_A = %s\n filename_b0 = %s\n iterations = %d\n", file_A, file_b0, k); // Allocate some memory. float *a = (float *)malloc(n * n * sizeof(float)); assert(a != NULL); float *b0 = (float *)malloc(n * sizeof(float)); assert(b0 != NULL); float *bk = (float *)malloc(n * sizeof(float)); assert(bk != NULL); float lambda_mag; // Load the input data. load_data(file_A, n, n, a); load_data(file_b0, n, 1, b0); // Run the power iteration. power_iteration_cpu(n, a, b0, k, &lambda_mag, bk); // If necessary, save the eigenvector in the output file. if(argc == 6) { char *file_bk = argv[6]; save_data(file_bk, n, 1, bk); } // Print the estimated eigenvector magnitude. printf("\nAfter %d iterations, magnitude of largest eigenvalue: %5.3f\n", k, lambda_mag); // Free up all the memory. free(a); free(b0); free(bk); exit(0); }
05a5c906f864df496749f505b44b50b7757e70c3.hip
// !!! This is a file automatically generated by hipify!!! /* * triplet_loss_layer.cu * */ #include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/triplet_loss_layer.hpp" namespace caffe { template <typename Dtype> void TripletLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count() / 3; const Dtype * anchor_data = bottom[0]->gpu_data(); const Dtype * pos_data = bottom[0]->gpu_data() + count; const Dtype * neg_data = bottom[0]->gpu_data() + 2 * count; const Dtype *label = bottom[1]->cpu_data(); for (int i = 0; i < bottom[1]->num() / 3; i++) { CHECK_EQ(label[i], label[i + bottom[1]->num() / 3]); CHECK_NE(label[i], label[i + 2 * bottom[1]->num() / 3]); } //const Dtype* sampleW = bottom[3]->cpu_data(); //const Dtype sampleW = Dtype(1); caffe_gpu_sub( count, anchor_data, // a pos_data, // p diff_ap_.mutable_gpu_data()); // a_i-p_i caffe_gpu_sub( count, anchor_data, // a neg_data, // n diff_an_.mutable_gpu_data()); // a_i-n_i caffe_gpu_sub( count, pos_data, // p neg_data, // n diff_pn_.mutable_gpu_data()); // p_i-n_i const int channels = bottom[0]->channels(); caffe_gpu_powx( count, diff_ap_.mutable_gpu_data(), // a_i-p_i Dtype(2), diff_sq_ap_.mutable_gpu_data()); // (a_i-p_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num() / 3, bottom[0]->channels(), Dtype(1.0), //alpha diff_sq_ap_.gpu_data(), // (a_i-p_i)^2 // A summer_vec_.gpu_data(), // x Dtype(0.0), //belta dist_sq_ap_.mutable_gpu_data()); // \Sum (a_i-p_i)^2 //y caffe_gpu_powx( count, diff_an_.mutable_gpu_data(), // a_i-n_i Dtype(2), diff_sq_an_.mutable_gpu_data()); // (a_i-n_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num() / 3, bottom[0]->channels(), Dtype(1.0), //alpha diff_sq_an_.gpu_data(), // (a_i-n_i)^2 // A summer_vec_.gpu_data(), // x Dtype(0.0), //belta dist_sq_an_.mutable_gpu_data()); // \Sum (a_i-n_i)^2 //y Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num()/3; ++i) { Dtype mdist = ::max(margin + dist_sq_ap_.cpu_data()[i] - dist_sq_an_.cpu_data()[i], Dtype(0.0)); loss += mdist; if (mdist < Dtype(1e-6)) { //prepare for backward pass caffe_gpu_set(channels, Dtype(0), diff_ap_.mutable_gpu_data() + (i*channels)); caffe_gpu_set(channels, Dtype(0), diff_an_.mutable_gpu_data() + (i*channels)); caffe_gpu_set(channels, Dtype(0), diff_pn_.mutable_gpu_data() + (i*channels)); } } loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int num = bottom[0]->num() / 3; int channels = bottom[0]->channels(); Dtype* anchor_out = bottom[0]->mutable_gpu_diff(); Dtype* pos_out = bottom[0]->mutable_gpu_diff() + bottom[0]->count() / 3; Dtype* neg_out = bottom[0]->mutable_gpu_diff() + 2 * bottom[0]->count() / 3; Dtype lamda = this->layer_param_.triplet_loss_param().lamda(); const Dtype alpha = lamda * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); for (int j = 0; j < num; ++j) { //anchor caffe_gpu_axpby(channels, alpha*Dtype(-1), diff_pn_.gpu_data() + (j*channels), Dtype(0.0), anchor_out + (j*channels)); //positive caffe_gpu_axpby(channels, alpha*Dtype(-1), diff_ap_.gpu_data() + (j*channels), Dtype(0.0), pos_out + (j*channels)); //negitive caffe_gpu_axpby(channels, alpha, diff_an_.gpu_data() + (j*channels), Dtype(0.0), neg_out + (j*channels)); } } INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); } // namespace caffe
05a5c906f864df496749f505b44b50b7757e70c3.cu
/* * triplet_loss_layer.cu * */ #include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/triplet_loss_layer.hpp" namespace caffe { template <typename Dtype> void TripletLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count() / 3; const Dtype * anchor_data = bottom[0]->gpu_data(); const Dtype * pos_data = bottom[0]->gpu_data() + count; const Dtype * neg_data = bottom[0]->gpu_data() + 2 * count; const Dtype *label = bottom[1]->cpu_data(); for (int i = 0; i < bottom[1]->num() / 3; i++) { CHECK_EQ(label[i], label[i + bottom[1]->num() / 3]); CHECK_NE(label[i], label[i + 2 * bottom[1]->num() / 3]); } //const Dtype* sampleW = bottom[3]->cpu_data(); //const Dtype sampleW = Dtype(1); caffe_gpu_sub( count, anchor_data, // a pos_data, // p diff_ap_.mutable_gpu_data()); // a_i-p_i caffe_gpu_sub( count, anchor_data, // a neg_data, // n diff_an_.mutable_gpu_data()); // a_i-n_i caffe_gpu_sub( count, pos_data, // p neg_data, // n diff_pn_.mutable_gpu_data()); // p_i-n_i const int channels = bottom[0]->channels(); caffe_gpu_powx( count, diff_ap_.mutable_gpu_data(), // a_i-p_i Dtype(2), diff_sq_ap_.mutable_gpu_data()); // (a_i-p_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num() / 3, bottom[0]->channels(), Dtype(1.0), //alpha diff_sq_ap_.gpu_data(), // (a_i-p_i)^2 // A summer_vec_.gpu_data(), // x Dtype(0.0), //belta dist_sq_ap_.mutable_gpu_data()); // \Sum (a_i-p_i)^2 //y caffe_gpu_powx( count, diff_an_.mutable_gpu_data(), // a_i-n_i Dtype(2), diff_sq_an_.mutable_gpu_data()); // (a_i-n_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num() / 3, bottom[0]->channels(), Dtype(1.0), //alpha diff_sq_an_.gpu_data(), // (a_i-n_i)^2 // A summer_vec_.gpu_data(), // x Dtype(0.0), //belta dist_sq_an_.mutable_gpu_data()); // \Sum (a_i-n_i)^2 //y Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num()/3; ++i) { Dtype mdist = std::max(margin + dist_sq_ap_.cpu_data()[i] - dist_sq_an_.cpu_data()[i], Dtype(0.0)); loss += mdist; if (mdist < Dtype(1e-6)) { //prepare for backward pass caffe_gpu_set(channels, Dtype(0), diff_ap_.mutable_gpu_data() + (i*channels)); caffe_gpu_set(channels, Dtype(0), diff_an_.mutable_gpu_data() + (i*channels)); caffe_gpu_set(channels, Dtype(0), diff_pn_.mutable_gpu_data() + (i*channels)); } } loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int num = bottom[0]->num() / 3; int channels = bottom[0]->channels(); Dtype* anchor_out = bottom[0]->mutable_gpu_diff(); Dtype* pos_out = bottom[0]->mutable_gpu_diff() + bottom[0]->count() / 3; Dtype* neg_out = bottom[0]->mutable_gpu_diff() + 2 * bottom[0]->count() / 3; Dtype lamda = this->layer_param_.triplet_loss_param().lamda(); const Dtype alpha = lamda * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); for (int j = 0; j < num; ++j) { //anchor caffe_gpu_axpby(channels, alpha*Dtype(-1), diff_pn_.gpu_data() + (j*channels), Dtype(0.0), anchor_out + (j*channels)); //positive caffe_gpu_axpby(channels, alpha*Dtype(-1), diff_ap_.gpu_data() + (j*channels), Dtype(0.0), pos_out + (j*channels)); //negitive caffe_gpu_axpby(channels, alpha, diff_an_.gpu_data() + (j*channels), Dtype(0.0), neg_out + (j*channels)); } } INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); } // namespace caffe
c464e734d6bb1f0c7cff248900c4df214cbbed3b.hip
// !!! This is a file automatically generated by hipify!!! // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
c464e734d6bb1f0c7cff248900c4df214cbbed3b.cu
// generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
2d18cfcf5add20a3242542e63cd62a80428504a7.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/GatedLinearUnit.hip" #else void THNN_(GatedLinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int dim) { THCUNN_assertSameGPU(state, 2, input, output); // size output to half of input dim = dim - TH_INDEX_BASE; const int64_t nIn = THCTensor_(sizeLegacyNoScalars)(state, input, dim); THArgCheck(nIn % 2 == 0, 2, "Halving dimension must be even. Dim %d is size %ld", dim + TH_INDEX_BASE, nIn); const int64_t inputSize = THCTensor_(size)(state, input, dim) / 2; std::vector<int64_t> newSizes = THTensor_sizesLegacyNoScalars(input); newSizes[dim] = inputSize; THCTensor_(resize)(state, output, newSizes, {}); // halve tensor THCTensor *firstHalf = THCTensor_(newNarrow)(state, input, dim, 0, inputSize); THCTensor *secondHalf = THCTensor_(newNarrow)(state, input, dim, inputSize, inputSize); // x = x1:cmul( sigmoid(x2) ) THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, output, secondHalf, firstHalf, gatedLinearCSigMul_functor<scalar_t, accreal>()); THCTensor_(free)(state, firstHalf); THCTensor_(free)(state, secondHalf); } void THNN_(GatedLinear_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int dim) { THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); dim = dim - TH_INDEX_BASE; const int64_t nIn = THCTensor_(size)(state, input, dim); THArgCheck(nIn % 2 == 0, 2, "Halving dimension must be even. Dim %d is size %ld", dim + TH_INDEX_BASE, nIn); THCTensor_(resizeAs)(state, gradInput, input); const int64_t inputSize = THCTensor_(size)(state, input, dim) / 2; THCTensor *firstHalf = THCTensor_(newNarrow)(state, input, dim, 0, inputSize); THCTensor *gradInputfirstHalf = THCTensor_(newNarrow)(state, gradInput, dim, 0, inputSize); const int64_t stride_i = THCTensor_(stride)(state, input, dim) * inputSize; const int64_t stride_gI = THCTensor_(stride)(state, gradInput, dim) * inputSize; THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInputfirstHalf, gradOutput, firstHalf, gatedLinearDerivative<scalar_t,accreal>(stride_i, stride_gI)); THCTensor_(free)(state, firstHalf); THCTensor_(free)(state, gradInputfirstHalf); } #endif
2d18cfcf5add20a3242542e63cd62a80428504a7.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/GatedLinearUnit.cu" #else void THNN_(GatedLinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int dim) { THCUNN_assertSameGPU(state, 2, input, output); // size output to half of input dim = dim - TH_INDEX_BASE; const int64_t nIn = THCTensor_(sizeLegacyNoScalars)(state, input, dim); THArgCheck(nIn % 2 == 0, 2, "Halving dimension must be even. Dim %d is size %ld", dim + TH_INDEX_BASE, nIn); const int64_t inputSize = THCTensor_(size)(state, input, dim) / 2; std::vector<int64_t> newSizes = THTensor_sizesLegacyNoScalars(input); newSizes[dim] = inputSize; THCTensor_(resize)(state, output, newSizes, {}); // halve tensor THCTensor *firstHalf = THCTensor_(newNarrow)(state, input, dim, 0, inputSize); THCTensor *secondHalf = THCTensor_(newNarrow)(state, input, dim, inputSize, inputSize); // x = x1:cmul( sigmoid(x2) ) THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, output, secondHalf, firstHalf, gatedLinearCSigMul_functor<scalar_t, accreal>()); THCTensor_(free)(state, firstHalf); THCTensor_(free)(state, secondHalf); } void THNN_(GatedLinear_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int dim) { THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); dim = dim - TH_INDEX_BASE; const int64_t nIn = THCTensor_(size)(state, input, dim); THArgCheck(nIn % 2 == 0, 2, "Halving dimension must be even. Dim %d is size %ld", dim + TH_INDEX_BASE, nIn); THCTensor_(resizeAs)(state, gradInput, input); const int64_t inputSize = THCTensor_(size)(state, input, dim) / 2; THCTensor *firstHalf = THCTensor_(newNarrow)(state, input, dim, 0, inputSize); THCTensor *gradInputfirstHalf = THCTensor_(newNarrow)(state, gradInput, dim, 0, inputSize); const int64_t stride_i = THCTensor_(stride)(state, input, dim) * inputSize; const int64_t stride_gI = THCTensor_(stride)(state, gradInput, dim) * inputSize; THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInputfirstHalf, gradOutput, firstHalf, gatedLinearDerivative<scalar_t,accreal>(stride_i, stride_gI)); THCTensor_(free)(state, firstHalf); THCTensor_(free)(state, gradInputfirstHalf); } #endif
e26eba4b45d6121646698e54117709078353f457.hip
// !!! This is a file automatically generated by hipify!!! #include <limits> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/zmath.cuh> namespace at { namespace native { // We manually overload angle because std::angle does not work with std::thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t angle_wrapper(scalar_t v) { return 0; } template<typename T> __host__ __device__ static inline thrust::complex<T> angle_wrapper(thrust::complex<T> v) { return thrust::arg(v); } void angle_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "angle_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return angle_wrapper(a); }); }); } // We manually overload real because std::real does not work with std::thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t real_wrapper(scalar_t v) { return v; } template<typename T> __host__ __device__ static inline thrust::complex<T> real_wrapper(thrust::complex<T> v) { return v.real(); } void real_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "real_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return real_wrapper(a); }); }); } // We manually overload imag because std::imag does not work with std::thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t imag_wrapper(scalar_t v) { return 0; } template<typename T> __host__ __device__ static inline thrust::complex<T> imag_wrapper(thrust::complex<T> v) { return v.imag(); } void imag_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "imag_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return imag_wrapper(a); }); }); } // We manually overload conj because std::conj does not work with std::thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t conj_wrapper(scalar_t v) { return v; } template<typename T> __host__ __device__ static inline thrust::complex<T> conj_wrapper(thrust::complex<T> v) { return thrust::conj(v); } void conj_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "conj_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return conj_wrapper(a); }); }); } REGISTER_DISPATCH(angle_stub, &angle_kernel_cuda); REGISTER_DISPATCH(real_stub, &real_kernel_cuda); REGISTER_DISPATCH(imag_stub, &imag_kernel_cuda); REGISTER_DISPATCH(conj_stub, &conj_kernel_cuda); }} // namespace at::native
e26eba4b45d6121646698e54117709078353f457.cu
#include <limits> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/zmath.cuh> namespace at { namespace native { // We manually overload angle because std::angle does not work with std::thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t angle_wrapper(scalar_t v) { return 0; } template<typename T> __host__ __device__ static inline thrust::complex<T> angle_wrapper(thrust::complex<T> v) { return thrust::arg(v); } void angle_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "angle_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return angle_wrapper(a); }); }); } // We manually overload real because std::real does not work with std::thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t real_wrapper(scalar_t v) { return v; } template<typename T> __host__ __device__ static inline thrust::complex<T> real_wrapper(thrust::complex<T> v) { return v.real(); } void real_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "real_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return real_wrapper(a); }); }); } // We manually overload imag because std::imag does not work with std::thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t imag_wrapper(scalar_t v) { return 0; } template<typename T> __host__ __device__ static inline thrust::complex<T> imag_wrapper(thrust::complex<T> v) { return v.imag(); } void imag_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "imag_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return imag_wrapper(a); }); }); } // We manually overload conj because std::conj does not work with std::thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t conj_wrapper(scalar_t v) { return v; } template<typename T> __host__ __device__ static inline thrust::complex<T> conj_wrapper(thrust::complex<T> v) { return thrust::conj(v); } void conj_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "conj_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return conj_wrapper(a); }); }); } REGISTER_DISPATCH(angle_stub, &angle_kernel_cuda); REGISTER_DISPATCH(real_stub, &real_kernel_cuda); REGISTER_DISPATCH(imag_stub, &imag_kernel_cuda); REGISTER_DISPATCH(conj_stub, &conj_kernel_cuda); }} // namespace at::native
aa0f92706aa2bd2c470982ccfe8e0dda6d00f106.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2016 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <rocblas.h> #include "headers.h" hipblasHandle_t cublasHandle; /* kernel to update delta2 parameter */ __global__ void k_updateDelta2( floatType_t *delta2, floatType_t const *z2, int const Xexamples, int const size ) { /* setup global threadID in X and Y directions */ int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; if( tidy < Xexamples && tidx < size ) { /* calculate the offset properly */ delta2[INDX(tidx,tidy,size)] *= z2[INDX(tidy,tidx,Xexamples)]; } /* end if */ } /* end k_updateDelta2 */ /* kernel for calculating the sigmoid of an array */ __global__ void k_sigmoid_f( floatType_t *array, int const size ) { /* setup global threadID in X direction */ int tid = blockDim.x * blockIdx.x + threadIdx.x; if( tid < size ) { /* use the sigmoid_f function to complete this loop */ array[tid] = sigmoid_f( array[tid] ); } /* end if */ } /* end sigmoidGradient */ /* kernel for calculating the gradient of the sigmoid function */ __global__ void k_sigmoidGradient_f( floatType_t *array, int const size ) { /* setup global threadID in X direction */ int tid = blockDim.x * blockIdx.x + threadIdx.x; if( tid < size ) { /* use the sigmoidGradient_f function to complete this loop */ array[tid] = sigmoidGradient_f( array[tid] ); } /* end if */ } /* end sigmoidGradient */ /* kernel to set the delta3 vector from Y properly delta3 is just the different between the calculated value a3 and the true value Y3 */ __global__ void setDelta3Vec( floatType_t *delta3, floatType_t const *Y, floatType_t const *a3, int const Xexamples ) { /* setup global threadID in X direction */ int tid = blockDim.x * blockIdx.x + threadIdx.x; if( tid < Xexamples ) { delta3[INDX((int)Y[tid],tid,11)] = (floatType_t) 1.0; for( int j = 0; j < 10; j++ ) { delta3[INDX(j+1,tid,11)] = a3[INDX(tid,j,Xexamples)] - delta3[INDX(j+1,tid,11)]; } /* end for j */ } return; } /* end setDelta3Vec */ /* init the array to 1.0. used to set the bias term */ __global__ void initOne( int size, floatType_t *array ) { /* setup global threadID in X direction */ int tid = blockDim.x * blockIdx.x + threadIdx.x; if( tid < size ) array[tid] = (floatType_t) 1.0; return; } /* end initOne */ /* debugging kernel for printing values */ __global__ void printKernel( int rows, int cols, floatType_t *array ) { for( int j = 0; j < cols; j++ ) { for( int i = 0; i < rows; i++ ) { printf("row %d col %d value %e\n",i,j,array[INDX(i,j,rows)] ); } /* end for */ } /* end for */ } /* end print Kernel */ /* debugging for printing on host */ void printHost( int rows, int cols, floatType_t *array ) { for( int j = 0; j < cols; j++ ) { for( int i = 0; i < rows; i++ ) { printf("row %d col %d value %e\n",i,j,array[INDX(i,j,rows)] ); } /* end for */ } /* end for */ } /* end print Kernel */ /* main function to train the network */ void trainNetwork( floatType_t *X, int const Xexamples, int const Xfeatures, floatType_t *theta1, int const theta1Rows, int const theta1Cols, floatType_t *theta2, int const theta2Rows, int const theta2Cols, floatType_t const *Y, float const learningRate, int const iterations, int const batchSize ) { floatType_t lambda = learningRate; floatType_t cost; checkCUBLAS( hipblasCreate( &cublasHandle ) ); /* allocate large GPU space for temporary arrays */ floatType_t *d_tempMatrix; checkCUDA( hipMalloc( &d_tempMatrix, sizeof(floatType_t) * ( Xexamples * (theta1Rows+1) + //z2 Xexamples * (theta1Rows+1) + //a2 Xexamples * (theta2Rows+1) + //a3 Xexamples * (theta1Rows+1) + //delta2 Xexamples * 11 ) ) ); //delta3 /* set the bias term to 1, sort of like the y-intercept */ for( int i = 0; i < Xexamples; i++ ) { X[INDX(0,i,Xfeatures)] = (floatType_t) 1.0; } /* end for */ /* malloc all arrays I need on the device and copy data from the host */ floatType_t *d_X; checkCUDA( hipMalloc( &d_X, sizeof(floatType_t)*Xexamples*Xfeatures)); checkCUDA( hipMemcpy( d_X, X, sizeof(floatType_t)*Xexamples*Xfeatures, hipMemcpyHostToDevice ) ); floatType_t *d_Y; checkCUDA( hipMalloc( &d_Y, sizeof(floatType_t)*Xexamples) ); checkCUDA( hipMemcpy( d_Y, Y, sizeof(floatType_t)*Xexamples, hipMemcpyHostToDevice ) ); /* theta1 and theta2 are the weights in the 1st and second layers */ floatType_t *d_theta1; checkCUDA( hipMalloc( &d_theta1, sizeof(floatType_t) * theta1Rows * theta1Cols ) ); checkCUDA( hipMemcpy( d_theta1, theta1, sizeof(floatType_t)*theta1Rows*theta1Cols, hipMemcpyHostToDevice ) ); floatType_t *d_theta2; checkCUDA( hipMalloc( &d_theta2, sizeof(floatType_t) * theta2Rows * theta2Cols ) ); checkCUDA( hipMemcpy( d_theta2, theta2, sizeof(floatType_t)*theta2Rows*theta2Cols, hipMemcpyHostToDevice ) ); /* theta1Grad and theta2Grad are the gradients for theta1 and theta2 */ floatType_t *d_theta1Grad, *d_theta2Grad; checkCUDA( hipMalloc( &d_theta1Grad, sizeof(floatType_t)*theta1Rows*theta1Cols ) ); checkCUDA( hipMalloc( &d_theta2Grad, sizeof(floatType_t)*theta2Rows*theta2Cols ) ); /* stochastic gradient descent in our case stochastic because the data is already scrambled */ int iter = 0; /* big while loop over the number of iterations to train */ while(iter < iterations ) { /* for loop over the batch size */ for( int j = 0; j < Xexamples; j+=batchSize ) { int tempBatchSize = min( batchSize, Xexamples - j ); /* bulk of computation here */ calcGradient( &d_X[INDX(0,j,Xfeatures)], tempBatchSize, Xfeatures, d_theta1, theta1Rows, theta1Cols, d_theta2, theta2Rows, theta2Cols, &d_Y[j], &cost, d_theta1Grad, d_theta2Grad, d_tempMatrix ); floatType_t alpha = -lambda; /* update the weights with the newly calculated gradients */ checkCUBLAS( hipblasSaxpy( cublasHandle, theta1Rows*theta1Cols, &alpha, d_theta1Grad, 1, d_theta1, 1 ) ); checkCUBLAS( hipblasSaxpy( cublasHandle, theta2Rows*theta2Cols, &alpha, d_theta2Grad, 1, d_theta2, 1 ) ); } /* end for */ iter++; printf("|"); fflush(stdout); if( iter % 72 == 0 ) printf("\n"); } /* end while */ /* copy theta1 and theta2 back to the host to use for prediction later */ checkCUDA( hipMemcpy( theta1, d_theta1, sizeof(floatType_t)*theta1Rows*theta1Cols, hipMemcpyDeviceToHost ) ); checkCUDA( hipMemcpy( theta2, d_theta2, sizeof(floatType_t)*theta2Rows*theta2Cols, hipMemcpyDeviceToHost ) ); // printf("\nFinal cost value %.3e\n",cost); printf("\n"); checkCUDA( hipFree( d_tempMatrix ) ); checkCUDA( hipFree( d_X ) ); checkCUDA( hipFree( d_Y ) ); checkCUDA( hipFree( d_theta1 ) ); checkCUDA( hipFree( d_theta2 ) ); checkCUDA( hipFree( d_theta1Grad ) ); checkCUDA( hipFree( d_theta2Grad ) ); } /* end trainNetwork */ void calcGradient( floatType_t *d_X, int const Xexamples, int const Xfeatures, floatType_t const *d_theta1, int const theta1Rows, int const theta1Cols, floatType_t const *d_theta2, int const theta2Rows, int const theta2Cols, floatType_t const *d_Y, floatType_t *cost, floatType_t *d_theta1Grad, floatType_t *d_theta2Grad, floatType_t *d_tempMatrix ) { floatType_t *d_z2, *d_a2, *d_a3, *d_delta3, *d_delta2; /* take tempMatrix and partition it up for use */ d_z2 = d_tempMatrix; d_a2 = &d_z2[Xexamples*(theta1Rows+1)]; d_a3 = &d_a2[Xexamples*(theta1Rows+1)]; d_delta2 = &d_a3[Xexamples*(theta2Rows+1)]; d_delta3 = &d_delta2[Xexamples*(theta1Rows+1)]; float alpha = 1.0; float beta = 0.0; if( sizeof( floatType_t ) == 4 ) { /* calculate X * theta1 to give z2 */ checkCUBLAS( hipblasSgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_T, Xexamples, theta1Rows, theta1Cols, &alpha, d_X, Xfeatures, d_theta1, theta1Rows, &beta, &d_z2[INDX(0,1,Xexamples)], Xexamples ) ); /* copy z2 into a2 */ checkCUDA( hipMemcpy( d_a2, d_z2, sizeof(floatType_t) * Xexamples * (theta1Rows+1), hipMemcpyDeviceToDevice ) ); /* calculate a2 = sigmoid(z2), the activation */ dim3 threads1(256,1,1); dim3 blocks1( Xexamples*(theta1Rows+1)/threads1.x + 1, 1, 1); hipLaunchKernelGGL(( k_sigmoid_f), dim3(blocks1), dim3(threads1) , 0, 0, d_a2, Xexamples*(theta1Rows+1) ); checkKERNEL() } /* end if */ else { } /* end else */ /* add a 1.0 to the beginning of each a2 vector for bias term */ hipLaunchKernelGGL(( initOne), dim3(Xexamples/256 + 1), dim3(256) , 0, 0, Xexamples, d_a2 ); checkKERNEL() if( sizeof( floatType_t ) == 4 ) { /* calculated z3 = a2 * theta2. put in a3 array space since we don't need z3 for anything else */ checkCUBLAS( hipblasSgemm( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, Xexamples, theta2Rows, theta2Cols, &alpha, d_a2, Xexamples, d_theta2, theta2Rows, &beta, d_a3, Xexamples ) ); /* calculate a3 = sigmoid(z3), the activation */ dim3 threads1(256,1,1); dim3 blocks1( Xexamples*(theta2Rows+1)/threads1.x + 1, 1, 1); hipLaunchKernelGGL(( k_sigmoid_f), dim3(blocks1), dim3(threads1) , 0, 0, d_a3, Xexamples*(theta2Rows+1) ); checkKERNEL() } /* end if */ else { } /* end else */ /* enable the following code if you wish to calculate the forward cost not strictly necessary to generate the gradients WARNING THIS IS BROKEN RIGHT NOW ON THE GPU!!! */ #if 0 floatType_t jTemp = 0.0; for( int row = 0; row < Xexamples; row++ ) { memset( yTemp, 0, sizeof(floatType_t) * 11 ); yTemp[ (int)Y[row] ] = (floatType_t) 1.0; for( int j = 1; j <= theta2Rows; j++ ) { jTemp += -log( a3[INDX(row,j-1,Xexamples)] ) * yTemp[j] - ( log( (floatType_t) 1.0 - a3[INDX(row,j-1,Xexamples)] ) * ( (floatType_t) 1.0 - yTemp[j] ) ) ; } /* end for */ } /* end for */ jTemp /= (floatType_t) Xexamples; *cost = jTemp; #endif checkCUDA( hipMemset( d_delta3, 0, sizeof(floatType_t)*11*Xexamples ) ); /* set delta3 to be the difference between a3 and y, the calculated versus the actual values */ hipLaunchKernelGGL(( setDelta3Vec), dim3(Xexamples/256+1), dim3(256) , 0, 0, d_delta3, d_Y, d_a3, Xexamples ); checkKERNEL() if( sizeof( floatType_t ) == 4 ) { /* calculated delta2 = theta2 * delta3 */ checkCUBLAS( hipblasSgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, theta2Cols, Xexamples, theta2Rows, &alpha, (float *)d_theta2, theta2Rows, (float *)&d_delta3[1], 11, &beta, (float *)d_delta2, theta1Rows+1 ) ); /* calculate the sigmoid gradient of z2 */ dim3 threads(256,1,1); dim3 blocks(Xexamples*(theta1Rows+1)+1/threads.x,1,1); hipLaunchKernelGGL(( k_sigmoidGradient_f), dim3(blocks), dim3(threads) , 0, 0, d_z2, Xexamples*(theta1Rows+1) ); checkKERNEL() /* update delta2 with the sigmoid gradient of z2 */ dim3 t1(32,32,1); dim3 b1((theta1Rows+1)/t1.x + 1, Xexamples/t1.y + 1, 1 ); hipLaunchKernelGGL(( k_updateDelta2), dim3(b1), dim3(t1) , 0, 0, d_delta2, d_z2, Xexamples, theta1Rows+1 ); checkKERNEL() floatType_t recip = (floatType_t) 1.0 / (floatType_t) Xexamples; /* calculate theta1Grad = delta2 * X */ checkCUBLAS( hipblasSgemm( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, theta1Rows, theta1Cols, Xexamples, &recip, (float *)&d_delta2[1], theta1Rows+1, (float *)d_X, Xfeatures, &beta, (float *)d_theta1Grad, theta1Rows ) ); /* calculate theta2Grad = delta3 * a2 */ checkCUBLAS( hipblasSgemm( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, theta2Rows, theta2Cols, Xexamples, &recip, (float *)&d_delta3[1], 11, (float *)d_a2, Xexamples, &beta, (float *)d_theta2Grad, theta2Rows ) ); } /* end if */ else { } /* end else */ } /* end calcGradient */ void predict(floatType_t *X, int const Xexamples, int const Xfeatures, floatType_t const *theta1, int const theta1Rows, int const theta1Cols, floatType_t const *theta2, int const theta2Rows, int const theta2Cols, int *predictVector) { floatType_t *tempMatrix, *z2, *a2, *a3; /* add the bias term to the X training set data */ for( int i = 0; i < Xexamples; i++ ) X[INDX(0,i,Xfeatures)] = (floatType_t) 1.0; /* malloc the temp space */ tempMatrix = (floatType_t *) malloc( sizeof(floatType_t) * ( Xexamples * (theta1Rows+1) + Xexamples * (theta1Rows+1) + Xexamples * (theta2Rows+1) ) ); /* carve up the temp space */ z2 = tempMatrix; a2 = &z2[INDX(Xexamples,theta1Rows,Xexamples)]; a3 = &a2[INDX(Xexamples,theta1Rows+1,Xexamples)]; if( sizeof( floatType_t ) == 4 ) { /* calculate z2 */ cblas_sgemm( CblasColMajor, CblasTrans, CblasTrans, Xexamples, theta1Rows, theta1Cols, 1.0f, (float *) X, Xfeatures, (float *) theta1, theta1Rows, 0.0f, (float *) &z2[INDX(0,1,Xexamples)], Xexamples ); /* calculate a2 */ for( int j = 1; j < theta1Rows+1; j++ ) for( int i = 0; i < Xexamples; i++ ) a2[INDX(i,j,Xexamples)] = sigmoid_f( z2[INDX(i,j,Xexamples)] ); } /* end if */ else { } /* end else */ /* add the bias term to a2 */ for( int i = 0; i < Xexamples; i++ ) a2[INDX(i,0,Xexamples)] = (floatType_t) 1.0; if( sizeof( floatType_t ) == 4 ) { /* calculate z3 */ cblas_sgemm( CblasColMajor, CblasNoTrans, CblasTrans, Xexamples, theta2Rows, theta2Cols, 1.0f, (float *) a2, Xexamples, (float *) theta2, theta2Rows, 0.0f, (float *) a3, Xexamples ); /* calculate a3 */ for( int j = 0; j < theta2Rows; j++ ) for( int i = 0; i < Xexamples; i++ ) a3[INDX(i,j,Xexamples)] = sigmoid_f( a3[INDX(i,j,Xexamples)] ); } /* end if */ else { } /* end else */ /* use a3 to populate the prediction vector. each element will be a digit between one and ten, which is the predicted value of the image */ for( int row = 0; row < Xexamples; row++ ) { floatType_t max = -99.0; int idx = -10; for( int i = 0; i < 10; i++ ) { if( a3[INDX(row,i,Xexamples)] > max ) { max = a3[INDX(row,i,Xexamples)]; idx = i+1; } /* end if */ } /* end for i */ predictVector[row] = idx; } /* end row */ free(tempMatrix); } /* end predict */ void readCommandLineArgs( int argc, char *argv[], float *learningRate, int *batchSize, int *iterations, int *sizeHiddenLayer ) { /* read command line input */ switch( argc ) { case 1: *learningRate = 0.3; *batchSize = 50; *iterations = 1; *sizeHiddenLayer = 25; break; case 2: if( strcmp( argv[1],"-h" ) == 0 ) { printf("Usage: ./x.nn -h for this message\n"); printf("Usage: ./x.nn <learningRate:float> <batchSize:int> <iterations:int> <hiddenLayerSize:int>\n"); exit(911); } /* end for */ break; case 5: *learningRate = atof( argv[1] ); if( *learningRate == 0.0f ) { printf("Invalid learning rate %s\n", argv[1] ); *learningRate = 0.3; printf("Defaulting to %e\n", *learningRate ); } /* end if */ *batchSize = atoi( argv[2] ); if( *batchSize <= 0 ) { printf("Invalid batchSize %s\n", argv[2] ); *batchSize = 50; printf("Defaulting to %d\n",*batchSize ); } /* end if */ *iterations = atoi( argv[3] ); if( *iterations <= 0 ) { printf("Invalid iteration size %s\n", argv[3] ); *iterations = 1; printf("Defaulting to %d\n",*iterations); } /* end if */ *sizeHiddenLayer = atoi( argv[4] ); if( *sizeHiddenLayer <= 0 ) { printf("Invalid hidden layer size %s\n", argv[4] ); *sizeHiddenLayer = 25; printf("Defaulting to %d\n",*sizeHiddenLayer ); } /* end if */ break; default: printf("Undefined command-line args\n"); printf("Usage: ./x.nn -h for this message\n"); printf("Usage: ./x.nn <learningRate:float> <batchSize:int> <iterations:int> <hiddenLayerSize:int>\n"); exit(911); break; } /* end switch */ /* print some initial stuff */ printf("Learning rate lambda is %.3e\n",*learningRate); printf("Batchsize is %d\n",*batchSize); printf("Number of iterations is %d\n",*iterations); printf("Hidden Layer Size is %d\n",*sizeHiddenLayer); } /* end readCommandLineArgs */ void readMatrixFromFile( char *fileName, float *matrix, int const rows, int const cols, int const ld ) { FILE *ifp; ifp = fopen( fileName, "r" ); if( ifp == NULL ) { fprintf(stderr, "Error opening file %s\n", fileName); exit(911); } /* end if */ for( int col = 0; col < cols; col++ ) { for( int row = 0; row < rows; row++ ) { if( !fscanf( ifp, "%f", &matrix[ INDX( row, col, ld ) ] ) ) { fprintf(stderr,"error reading training matrix file \n"); exit(911); } /* end if */ } /* end for row */ } /* end for col */ fclose(ifp); return; } /* end readMatrixFromFile */
aa0f92706aa2bd2c470982ccfe8e0dda6d00f106.cu
/* * Copyright 2016 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cublas_v2.h> #include "headers.h" cublasHandle_t cublasHandle; /* kernel to update delta2 parameter */ __global__ void k_updateDelta2( floatType_t *delta2, floatType_t const *z2, int const Xexamples, int const size ) { /* setup global threadID in X and Y directions */ int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; if( tidy < Xexamples && tidx < size ) { /* calculate the offset properly */ delta2[INDX(tidx,tidy,size)] *= z2[INDX(tidy,tidx,Xexamples)]; } /* end if */ } /* end k_updateDelta2 */ /* kernel for calculating the sigmoid of an array */ __global__ void k_sigmoid_f( floatType_t *array, int const size ) { /* setup global threadID in X direction */ int tid = blockDim.x * blockIdx.x + threadIdx.x; if( tid < size ) { /* use the sigmoid_f function to complete this loop */ array[tid] = sigmoid_f( array[tid] ); } /* end if */ } /* end sigmoidGradient */ /* kernel for calculating the gradient of the sigmoid function */ __global__ void k_sigmoidGradient_f( floatType_t *array, int const size ) { /* setup global threadID in X direction */ int tid = blockDim.x * blockIdx.x + threadIdx.x; if( tid < size ) { /* use the sigmoidGradient_f function to complete this loop */ array[tid] = sigmoidGradient_f( array[tid] ); } /* end if */ } /* end sigmoidGradient */ /* kernel to set the delta3 vector from Y properly delta3 is just the different between the calculated value a3 and the true value Y3 */ __global__ void setDelta3Vec( floatType_t *delta3, floatType_t const *Y, floatType_t const *a3, int const Xexamples ) { /* setup global threadID in X direction */ int tid = blockDim.x * blockIdx.x + threadIdx.x; if( tid < Xexamples ) { delta3[INDX((int)Y[tid],tid,11)] = (floatType_t) 1.0; for( int j = 0; j < 10; j++ ) { delta3[INDX(j+1,tid,11)] = a3[INDX(tid,j,Xexamples)] - delta3[INDX(j+1,tid,11)]; } /* end for j */ } return; } /* end setDelta3Vec */ /* init the array to 1.0. used to set the bias term */ __global__ void initOne( int size, floatType_t *array ) { /* setup global threadID in X direction */ int tid = blockDim.x * blockIdx.x + threadIdx.x; if( tid < size ) array[tid] = (floatType_t) 1.0; return; } /* end initOne */ /* debugging kernel for printing values */ __global__ void printKernel( int rows, int cols, floatType_t *array ) { for( int j = 0; j < cols; j++ ) { for( int i = 0; i < rows; i++ ) { printf("row %d col %d value %e\n",i,j,array[INDX(i,j,rows)] ); } /* end for */ } /* end for */ } /* end print Kernel */ /* debugging for printing on host */ void printHost( int rows, int cols, floatType_t *array ) { for( int j = 0; j < cols; j++ ) { for( int i = 0; i < rows; i++ ) { printf("row %d col %d value %e\n",i,j,array[INDX(i,j,rows)] ); } /* end for */ } /* end for */ } /* end print Kernel */ /* main function to train the network */ void trainNetwork( floatType_t *X, int const Xexamples, int const Xfeatures, floatType_t *theta1, int const theta1Rows, int const theta1Cols, floatType_t *theta2, int const theta2Rows, int const theta2Cols, floatType_t const *Y, float const learningRate, int const iterations, int const batchSize ) { floatType_t lambda = learningRate; floatType_t cost; checkCUBLAS( cublasCreate( &cublasHandle ) ); /* allocate large GPU space for temporary arrays */ floatType_t *d_tempMatrix; checkCUDA( cudaMalloc( &d_tempMatrix, sizeof(floatType_t) * ( Xexamples * (theta1Rows+1) + //z2 Xexamples * (theta1Rows+1) + //a2 Xexamples * (theta2Rows+1) + //a3 Xexamples * (theta1Rows+1) + //delta2 Xexamples * 11 ) ) ); //delta3 /* set the bias term to 1, sort of like the y-intercept */ for( int i = 0; i < Xexamples; i++ ) { X[INDX(0,i,Xfeatures)] = (floatType_t) 1.0; } /* end for */ /* malloc all arrays I need on the device and copy data from the host */ floatType_t *d_X; checkCUDA( cudaMalloc( &d_X, sizeof(floatType_t)*Xexamples*Xfeatures)); checkCUDA( cudaMemcpy( d_X, X, sizeof(floatType_t)*Xexamples*Xfeatures, cudaMemcpyHostToDevice ) ); floatType_t *d_Y; checkCUDA( cudaMalloc( &d_Y, sizeof(floatType_t)*Xexamples) ); checkCUDA( cudaMemcpy( d_Y, Y, sizeof(floatType_t)*Xexamples, cudaMemcpyHostToDevice ) ); /* theta1 and theta2 are the weights in the 1st and second layers */ floatType_t *d_theta1; checkCUDA( cudaMalloc( &d_theta1, sizeof(floatType_t) * theta1Rows * theta1Cols ) ); checkCUDA( cudaMemcpy( d_theta1, theta1, sizeof(floatType_t)*theta1Rows*theta1Cols, cudaMemcpyHostToDevice ) ); floatType_t *d_theta2; checkCUDA( cudaMalloc( &d_theta2, sizeof(floatType_t) * theta2Rows * theta2Cols ) ); checkCUDA( cudaMemcpy( d_theta2, theta2, sizeof(floatType_t)*theta2Rows*theta2Cols, cudaMemcpyHostToDevice ) ); /* theta1Grad and theta2Grad are the gradients for theta1 and theta2 */ floatType_t *d_theta1Grad, *d_theta2Grad; checkCUDA( cudaMalloc( &d_theta1Grad, sizeof(floatType_t)*theta1Rows*theta1Cols ) ); checkCUDA( cudaMalloc( &d_theta2Grad, sizeof(floatType_t)*theta2Rows*theta2Cols ) ); /* stochastic gradient descent in our case stochastic because the data is already scrambled */ int iter = 0; /* big while loop over the number of iterations to train */ while(iter < iterations ) { /* for loop over the batch size */ for( int j = 0; j < Xexamples; j+=batchSize ) { int tempBatchSize = min( batchSize, Xexamples - j ); /* bulk of computation here */ calcGradient( &d_X[INDX(0,j,Xfeatures)], tempBatchSize, Xfeatures, d_theta1, theta1Rows, theta1Cols, d_theta2, theta2Rows, theta2Cols, &d_Y[j], &cost, d_theta1Grad, d_theta2Grad, d_tempMatrix ); floatType_t alpha = -lambda; /* update the weights with the newly calculated gradients */ checkCUBLAS( cublasSaxpy( cublasHandle, theta1Rows*theta1Cols, &alpha, d_theta1Grad, 1, d_theta1, 1 ) ); checkCUBLAS( cublasSaxpy( cublasHandle, theta2Rows*theta2Cols, &alpha, d_theta2Grad, 1, d_theta2, 1 ) ); } /* end for */ iter++; printf("|"); fflush(stdout); if( iter % 72 == 0 ) printf("\n"); } /* end while */ /* copy theta1 and theta2 back to the host to use for prediction later */ checkCUDA( cudaMemcpy( theta1, d_theta1, sizeof(floatType_t)*theta1Rows*theta1Cols, cudaMemcpyDeviceToHost ) ); checkCUDA( cudaMemcpy( theta2, d_theta2, sizeof(floatType_t)*theta2Rows*theta2Cols, cudaMemcpyDeviceToHost ) ); // printf("\nFinal cost value %.3e\n",cost); printf("\n"); checkCUDA( cudaFree( d_tempMatrix ) ); checkCUDA( cudaFree( d_X ) ); checkCUDA( cudaFree( d_Y ) ); checkCUDA( cudaFree( d_theta1 ) ); checkCUDA( cudaFree( d_theta2 ) ); checkCUDA( cudaFree( d_theta1Grad ) ); checkCUDA( cudaFree( d_theta2Grad ) ); } /* end trainNetwork */ void calcGradient( floatType_t *d_X, int const Xexamples, int const Xfeatures, floatType_t const *d_theta1, int const theta1Rows, int const theta1Cols, floatType_t const *d_theta2, int const theta2Rows, int const theta2Cols, floatType_t const *d_Y, floatType_t *cost, floatType_t *d_theta1Grad, floatType_t *d_theta2Grad, floatType_t *d_tempMatrix ) { floatType_t *d_z2, *d_a2, *d_a3, *d_delta3, *d_delta2; /* take tempMatrix and partition it up for use */ d_z2 = d_tempMatrix; d_a2 = &d_z2[Xexamples*(theta1Rows+1)]; d_a3 = &d_a2[Xexamples*(theta1Rows+1)]; d_delta2 = &d_a3[Xexamples*(theta2Rows+1)]; d_delta3 = &d_delta2[Xexamples*(theta1Rows+1)]; float alpha = 1.0; float beta = 0.0; if( sizeof( floatType_t ) == 4 ) { /* calculate X * theta1 to give z2 */ checkCUBLAS( cublasSgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_T, Xexamples, theta1Rows, theta1Cols, &alpha, d_X, Xfeatures, d_theta1, theta1Rows, &beta, &d_z2[INDX(0,1,Xexamples)], Xexamples ) ); /* copy z2 into a2 */ checkCUDA( cudaMemcpy( d_a2, d_z2, sizeof(floatType_t) * Xexamples * (theta1Rows+1), cudaMemcpyDeviceToDevice ) ); /* calculate a2 = sigmoid(z2), the activation */ dim3 threads1(256,1,1); dim3 blocks1( Xexamples*(theta1Rows+1)/threads1.x + 1, 1, 1); k_sigmoid_f<<< blocks1, threads1 >>>( d_a2, Xexamples*(theta1Rows+1) ); checkKERNEL() } /* end if */ else { } /* end else */ /* add a 1.0 to the beginning of each a2 vector for bias term */ initOne<<< Xexamples/256 + 1, 256 >>>( Xexamples, d_a2 ); checkKERNEL() if( sizeof( floatType_t ) == 4 ) { /* calculated z3 = a2 * theta2. put in a3 array space since we don't need z3 for anything else */ checkCUBLAS( cublasSgemm( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, Xexamples, theta2Rows, theta2Cols, &alpha, d_a2, Xexamples, d_theta2, theta2Rows, &beta, d_a3, Xexamples ) ); /* calculate a3 = sigmoid(z3), the activation */ dim3 threads1(256,1,1); dim3 blocks1( Xexamples*(theta2Rows+1)/threads1.x + 1, 1, 1); k_sigmoid_f<<< blocks1, threads1 >>>( d_a3, Xexamples*(theta2Rows+1) ); checkKERNEL() } /* end if */ else { } /* end else */ /* enable the following code if you wish to calculate the forward cost not strictly necessary to generate the gradients WARNING THIS IS BROKEN RIGHT NOW ON THE GPU!!! */ #if 0 floatType_t jTemp = 0.0; for( int row = 0; row < Xexamples; row++ ) { memset( yTemp, 0, sizeof(floatType_t) * 11 ); yTemp[ (int)Y[row] ] = (floatType_t) 1.0; for( int j = 1; j <= theta2Rows; j++ ) { jTemp += -log( a3[INDX(row,j-1,Xexamples)] ) * yTemp[j] - ( log( (floatType_t) 1.0 - a3[INDX(row,j-1,Xexamples)] ) * ( (floatType_t) 1.0 - yTemp[j] ) ) ; } /* end for */ } /* end for */ jTemp /= (floatType_t) Xexamples; *cost = jTemp; #endif checkCUDA( cudaMemset( d_delta3, 0, sizeof(floatType_t)*11*Xexamples ) ); /* set delta3 to be the difference between a3 and y, the calculated versus the actual values */ setDelta3Vec<<< Xexamples/256+1, 256 >>>( d_delta3, d_Y, d_a3, Xexamples ); checkKERNEL() if( sizeof( floatType_t ) == 4 ) { /* calculated delta2 = theta2 * delta3 */ checkCUBLAS( cublasSgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, theta2Cols, Xexamples, theta2Rows, &alpha, (float *)d_theta2, theta2Rows, (float *)&d_delta3[1], 11, &beta, (float *)d_delta2, theta1Rows+1 ) ); /* calculate the sigmoid gradient of z2 */ dim3 threads(256,1,1); dim3 blocks(Xexamples*(theta1Rows+1)+1/threads.x,1,1); k_sigmoidGradient_f<<< blocks, threads >>>( d_z2, Xexamples*(theta1Rows+1) ); checkKERNEL() /* update delta2 with the sigmoid gradient of z2 */ dim3 t1(32,32,1); dim3 b1((theta1Rows+1)/t1.x + 1, Xexamples/t1.y + 1, 1 ); k_updateDelta2<<< b1, t1 >>>( d_delta2, d_z2, Xexamples, theta1Rows+1 ); checkKERNEL() floatType_t recip = (floatType_t) 1.0 / (floatType_t) Xexamples; /* calculate theta1Grad = delta2 * X */ checkCUBLAS( cublasSgemm( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, theta1Rows, theta1Cols, Xexamples, &recip, (float *)&d_delta2[1], theta1Rows+1, (float *)d_X, Xfeatures, &beta, (float *)d_theta1Grad, theta1Rows ) ); /* calculate theta2Grad = delta3 * a2 */ checkCUBLAS( cublasSgemm( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, theta2Rows, theta2Cols, Xexamples, &recip, (float *)&d_delta3[1], 11, (float *)d_a2, Xexamples, &beta, (float *)d_theta2Grad, theta2Rows ) ); } /* end if */ else { } /* end else */ } /* end calcGradient */ void predict(floatType_t *X, int const Xexamples, int const Xfeatures, floatType_t const *theta1, int const theta1Rows, int const theta1Cols, floatType_t const *theta2, int const theta2Rows, int const theta2Cols, int *predictVector) { floatType_t *tempMatrix, *z2, *a2, *a3; /* add the bias term to the X training set data */ for( int i = 0; i < Xexamples; i++ ) X[INDX(0,i,Xfeatures)] = (floatType_t) 1.0; /* malloc the temp space */ tempMatrix = (floatType_t *) malloc( sizeof(floatType_t) * ( Xexamples * (theta1Rows+1) + Xexamples * (theta1Rows+1) + Xexamples * (theta2Rows+1) ) ); /* carve up the temp space */ z2 = tempMatrix; a2 = &z2[INDX(Xexamples,theta1Rows,Xexamples)]; a3 = &a2[INDX(Xexamples,theta1Rows+1,Xexamples)]; if( sizeof( floatType_t ) == 4 ) { /* calculate z2 */ cblas_sgemm( CblasColMajor, CblasTrans, CblasTrans, Xexamples, theta1Rows, theta1Cols, 1.0f, (float *) X, Xfeatures, (float *) theta1, theta1Rows, 0.0f, (float *) &z2[INDX(0,1,Xexamples)], Xexamples ); /* calculate a2 */ for( int j = 1; j < theta1Rows+1; j++ ) for( int i = 0; i < Xexamples; i++ ) a2[INDX(i,j,Xexamples)] = sigmoid_f( z2[INDX(i,j,Xexamples)] ); } /* end if */ else { } /* end else */ /* add the bias term to a2 */ for( int i = 0; i < Xexamples; i++ ) a2[INDX(i,0,Xexamples)] = (floatType_t) 1.0; if( sizeof( floatType_t ) == 4 ) { /* calculate z3 */ cblas_sgemm( CblasColMajor, CblasNoTrans, CblasTrans, Xexamples, theta2Rows, theta2Cols, 1.0f, (float *) a2, Xexamples, (float *) theta2, theta2Rows, 0.0f, (float *) a3, Xexamples ); /* calculate a3 */ for( int j = 0; j < theta2Rows; j++ ) for( int i = 0; i < Xexamples; i++ ) a3[INDX(i,j,Xexamples)] = sigmoid_f( a3[INDX(i,j,Xexamples)] ); } /* end if */ else { } /* end else */ /* use a3 to populate the prediction vector. each element will be a digit between one and ten, which is the predicted value of the image */ for( int row = 0; row < Xexamples; row++ ) { floatType_t max = -99.0; int idx = -10; for( int i = 0; i < 10; i++ ) { if( a3[INDX(row,i,Xexamples)] > max ) { max = a3[INDX(row,i,Xexamples)]; idx = i+1; } /* end if */ } /* end for i */ predictVector[row] = idx; } /* end row */ free(tempMatrix); } /* end predict */ void readCommandLineArgs( int argc, char *argv[], float *learningRate, int *batchSize, int *iterations, int *sizeHiddenLayer ) { /* read command line input */ switch( argc ) { case 1: *learningRate = 0.3; *batchSize = 50; *iterations = 1; *sizeHiddenLayer = 25; break; case 2: if( strcmp( argv[1],"-h" ) == 0 ) { printf("Usage: ./x.nn -h for this message\n"); printf("Usage: ./x.nn <learningRate:float> <batchSize:int> <iterations:int> <hiddenLayerSize:int>\n"); exit(911); } /* end for */ break; case 5: *learningRate = atof( argv[1] ); if( *learningRate == 0.0f ) { printf("Invalid learning rate %s\n", argv[1] ); *learningRate = 0.3; printf("Defaulting to %e\n", *learningRate ); } /* end if */ *batchSize = atoi( argv[2] ); if( *batchSize <= 0 ) { printf("Invalid batchSize %s\n", argv[2] ); *batchSize = 50; printf("Defaulting to %d\n",*batchSize ); } /* end if */ *iterations = atoi( argv[3] ); if( *iterations <= 0 ) { printf("Invalid iteration size %s\n", argv[3] ); *iterations = 1; printf("Defaulting to %d\n",*iterations); } /* end if */ *sizeHiddenLayer = atoi( argv[4] ); if( *sizeHiddenLayer <= 0 ) { printf("Invalid hidden layer size %s\n", argv[4] ); *sizeHiddenLayer = 25; printf("Defaulting to %d\n",*sizeHiddenLayer ); } /* end if */ break; default: printf("Undefined command-line args\n"); printf("Usage: ./x.nn -h for this message\n"); printf("Usage: ./x.nn <learningRate:float> <batchSize:int> <iterations:int> <hiddenLayerSize:int>\n"); exit(911); break; } /* end switch */ /* print some initial stuff */ printf("Learning rate lambda is %.3e\n",*learningRate); printf("Batchsize is %d\n",*batchSize); printf("Number of iterations is %d\n",*iterations); printf("Hidden Layer Size is %d\n",*sizeHiddenLayer); } /* end readCommandLineArgs */ void readMatrixFromFile( char *fileName, float *matrix, int const rows, int const cols, int const ld ) { FILE *ifp; ifp = fopen( fileName, "r" ); if( ifp == NULL ) { fprintf(stderr, "Error opening file %s\n", fileName); exit(911); } /* end if */ for( int col = 0; col < cols; col++ ) { for( int row = 0; row < rows; row++ ) { if( !fscanf( ifp, "%f", &matrix[ INDX( row, col, ld ) ] ) ) { fprintf(stderr,"error reading training matrix file \n"); exit(911); } /* end if */ } /* end for row */ } /* end for col */ fclose(ifp); return; } /* end readMatrixFromFile */
fd59fcc89d86d2dbdc114f357bc2c1a4f10d9084.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "gputimer.h" //size of matrix is MxN const int N = 64; //number of rows const int M = 32; //number of columns void fill_matrix(float *mat, int rowSize,int columnSize) { for(int j=0; j < rowSize * columnSize; j++) mat[j] = (float) j; } void print_Matrix(float *mat, int numRows, int numColumns){ for(int i = 0; i < numRows; i++){ for(int j = 0; j<numColumns; j++){ printf("%4.4g ", mat[j + i*numColumns]); } printf("\n"); } } int compare_matrices(float *gpu, float *ref) { int result = 0; for(int j=0; j < M; j++) for(int i=0; i < N; i++) if (ref[i + j*N] != gpu[i + j*N]) { // printf("reference(%d,%d) = %f but test(%d,%d) = %f\n", // i,j,ref[i+j*N],i,j,test[i+j*N]); result = 1; } return result; } void exp_CPU(float* in, int numRows, int numColumns, float* out){ for(int i = 0; i< numRows; i++){ for(int j = 0; j< numColumns; j++){ out[i*numColumns + j] = in[i*numColumns + j]/2; } } } __global__ void parallel_exp(float* in, float* out){ int myIdx = blockDim.x*blockIdx.x + threadIdx.x; out[myIdx] = in[myIdx]/2; } int main(int argc, char **argv) { int numbytes = N * M * sizeof(float); float* in = (float*) malloc(numbytes); float* out = (float*) malloc(numbytes); float* gold = (float*) malloc(numbytes); fill_matrix(in,N,M); exp_CPU(in,N,M,gold); //print_Matrix(in,N,M); float *d_in,*d_out; hipMalloc(&d_in,numbytes); hipMalloc(&d_out,numbytes); hipMemcpy(d_in,in,numbytes,hipMemcpyHostToDevice); GpuTimer timer; dim3 blocks(64); dim3 threads(32); timer.Start(); hipLaunchKernelGGL(( parallel_exp), dim3(blocks),dim3(threads), 0, 0, d_in,d_out); hipMemcpy(out,d_out,numbytes,hipMemcpyDeviceToHost); printf("transpose_serial: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); //print_Matrix(gold,N,M); }
fd59fcc89d86d2dbdc114f357bc2c1a4f10d9084.cu
#include <stdio.h> #include "gputimer.h" //size of matrix is MxN const int N = 64; //number of rows const int M = 32; //number of columns void fill_matrix(float *mat, int rowSize,int columnSize) { for(int j=0; j < rowSize * columnSize; j++) mat[j] = (float) j; } void print_Matrix(float *mat, int numRows, int numColumns){ for(int i = 0; i < numRows; i++){ for(int j = 0; j<numColumns; j++){ printf("%4.4g ", mat[j + i*numColumns]); } printf("\n"); } } int compare_matrices(float *gpu, float *ref) { int result = 0; for(int j=0; j < M; j++) for(int i=0; i < N; i++) if (ref[i + j*N] != gpu[i + j*N]) { // printf("reference(%d,%d) = %f but test(%d,%d) = %f\n", // i,j,ref[i+j*N],i,j,test[i+j*N]); result = 1; } return result; } void exp_CPU(float* in, int numRows, int numColumns, float* out){ for(int i = 0; i< numRows; i++){ for(int j = 0; j< numColumns; j++){ out[i*numColumns + j] = in[i*numColumns + j]/2; } } } __global__ void parallel_exp(float* in, float* out){ int myIdx = blockDim.x*blockIdx.x + threadIdx.x; out[myIdx] = in[myIdx]/2; } int main(int argc, char **argv) { int numbytes = N * M * sizeof(float); float* in = (float*) malloc(numbytes); float* out = (float*) malloc(numbytes); float* gold = (float*) malloc(numbytes); fill_matrix(in,N,M); exp_CPU(in,N,M,gold); //print_Matrix(in,N,M); float *d_in,*d_out; cudaMalloc(&d_in,numbytes); cudaMalloc(&d_out,numbytes); cudaMemcpy(d_in,in,numbytes,cudaMemcpyHostToDevice); GpuTimer timer; dim3 blocks(64); dim3 threads(32); timer.Start(); parallel_exp<<<blocks,threads>>>(d_in,d_out); cudaMemcpy(out,d_out,numbytes,cudaMemcpyDeviceToHost); printf("transpose_serial: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); //print_Matrix(gold,N,M); }
178bea257b1133bc01dad8de2073fbe76caa7587.hip
// !!! This is a file automatically generated by hipify!!! #include "Python.h" #include "arrayobject.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #define IND_DTYPE unsigned long long #define O3_IND(A,B,C,D)((D) + (C)*max_output_sz3 + (B)*max_output_sz3_max_output_sz3 + (A)*max_output_sz3_max_output_sz3_n3) #define O2_IND(A,B,C,D)((D) + (C)*max_output_sz2 + (B)*max_output_sz2_max_output_sz2 + (A)*max_output_sz2_max_output_sz2_n2) #define O1_IND(A,B,C,D)((D) + (C)*max_output_sz1 + (B)*max_output_sz1_max_output_sz1 + (A)*max_output_sz1_max_output_sz1_n1) #define S1_IND(A,B,C,D,E)((E) + (D)*s1 + (C)*s1_s1 + (B)*s1_s1_n1 + (A)*s1_s1_n1_3) #define S2_IND(A,B,C,D,E)((E) + (D)*s2 + (C)*s2_s2 + (B)*s2_s2_n1 + (A)*s2_s2_n1_n2) #define S3_IND(A,B,C,D,E)((E) + (D)*s3 + (C)*s3_s3 + (B)*s3_s3_n2 + (A)*s3_s3_n2_n3) #define SL_IND(A,B,C,D)((D) + (C)*max_output_sz3 + (B)*max_output_sz3_max_output_sz3 + (A)*max_output_sz3_max_output_sz3_n3) #define F1_IND(A,B,C,D)(D + (s1)*C + (s1*s1)*B + (s1*s1*n0)*A) #define F2_IND(A,B,C,D)(D + (s2)*C + (s2*s2)*B + (s2*s2*n1)*A) #define F3_IND(A,B,C,D)(D + (s3)*C + (s3*s3)*B + (s3*s3*n2)*A) #define FL_IND(A,B,C,D)(D + (max_output_sz3)*C + (max_output_sz3*max_output_sz3)*B + (max_output_sz3*max_output_sz3*n3)*A) #define I_IND(A,B,C,D)((D) + (C)*img_sz + (B)*img_sz_img_sz + (A)*img_sz_img_sz_3) #define CHECK_CUDA_ERR {err = hipGetLastError();if(err != hipSuccess){\ printf("CUDA error: %s, %s, %i\n",hipGetErrorString(err),__FILE__,__LINE__);return NULL;}} #define DATA_TYPE_SZ sizeof(float) #define N_OUTPUTS 10 #define N_SIGMAS 10 #define N_LAYERS 4 #define N_GPUS 4 float * sum_res_c[N_GPUS][N_OUTPUTS]; int deriv_layer_ind_res[N_GPUS][N_OUTPUTS]; int N_C, n1, n0=3, s1, n2, s2, n3, s3, max_output_sz3; // GPU pointers, one for each GPU float *F1s_c[N_GPUS], *F2s_c[N_GPUS], *F3s_c[N_GPUS], *FLs_c[N_GPUS]; float *sigma31s_c[N_GPUS][N_SIGMAS]; // second dimension is the layer, generally not all GPUs will have all sigmas. float *sigma11s_c[N_GPUS]; IND_DTYPE sigma11_len[N_GPUS], *inds_c[N_GPUS], *offsets_c[N_GPUS]; IND_DTYPE n_inds[N_GPUS], n_inds_FL321[N_GPUS]; int N_Cs[N_GPUS]; float *FL321s_c[N_GPUS]; float *F_sum_c[N_GPUS][5], *F_partial_c[N_GPUS][5]; int dims_F_sum[N_GPUS][5][4]; int n1s[N_GPUS][N_SIGMAS], n0s[N_GPUS][N_SIGMAS], s1s[N_GPUS][N_SIGMAS], n2s[N_GPUS][N_SIGMAS], s2s[N_GPUS][N_SIGMAS], n3s[N_GPUS][N_SIGMAS], s3s[N_GPUS][N_SIGMAS], max_output_sz3s[N_GPUS][N_SIGMAS]; int max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0_n1s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3s[N_GPUS][N_SIGMAS], z2b[N_GPUS][N_SIGMAS]; #include "set_sigma_buffer.c" #include "set_sigma11_buffer.c" #include "set_FL321_buffer.c" #include "einsum_deriv_gpu.cu" #include "einsum_return.cu" #include "set_filter_buffers.cu" #include "compute_sigma31_full_gpu.cu" #include "compute_patch_inds.cu" #include "compute_patch_inds_addresses.cu" #include "compute_sigma11_gpu.cu" #include "compute_sigma11.cu" #include "max_pool_locs.cu" #include "compute_F_prod_inds.cu" #include "compute_F_layer_sum_inds.cu" #include "compute_F_layer_sum_deriv_inds.cu" #include "compute_F_layer_sum_deriv_inds_gpu.cu" #include "compute_F_layer_sum_deriv_inds_gpu_return.cu" #include "compute_sigma11_lin_gpu.cu" #include "pred_deriv_gpu.cu" #include "set_img_from_patches.cu" #include "bp_patch_sigma31.cu" #include "bp_patch_sigma31_sup.cu" #include "bp_patch_sigma31_uns.cu" static PyMethodDef _sigma31_layers[] = { {"compute_sigma31_full_gpu", compute_sigma31_full_gpu, METH_VARARGS}, {"compute_patch_inds", compute_patch_inds, METH_VARARGS}, {"compute_patch_inds_addresses", compute_patch_inds_addresses, METH_VARARGS}, {"compute_F_prod_inds", compute_F_prod_inds, METH_VARARGS}, {"compute_F_layer_sum_inds", compute_F_layer_sum_inds, METH_VARARGS}, {"compute_F_layer_sum_deriv_inds", compute_F_layer_sum_deriv_inds, METH_VARARGS}, {"compute_F_layer_sum_deriv_inds_gpu", compute_F_layer_sum_deriv_inds_gpu, METH_VARARGS}, {"compute_F_layer_sum_deriv_inds_gpu_return", compute_F_layer_sum_deriv_inds_gpu_return, METH_VARARGS}, {"compute_sigma11", compute_sigma11, METH_VARARGS}, {"compute_sigma11_gpu", compute_sigma11_gpu, METH_VARARGS}, {"max_pool_locs", max_pool_locs, METH_VARARGS}, {"einsum_deriv_gpu", einsum_deriv_gpu, METH_VARARGS}, {"set_sigma_buffer", set_sigma_buffer, METH_VARARGS}, {"set_sigma11_buffer", set_sigma11_buffer, METH_VARARGS}, {"set_FL321_buffer", set_FL321_buffer, METH_VARARGS}, {"set_filter_buffers", set_filter_buffers, METH_VARARGS}, {"einsum_return", einsum_return, METH_VARARGS}, {"compute_sigma11_lin_gpu", compute_sigma11_lin_gpu, METH_VARARGS}, {"pred_deriv_gpu", pred_deriv_gpu, METH_VARARGS}, {"set_img_from_patches", set_img_from_patches, METH_VARARGS}, {"bp_patch_sigma31", bp_patch_sigma31, METH_VARARGS}, {"bp_patch_sigma31_sup", bp_patch_sigma31_sup, METH_VARARGS}, {"bp_patch_sigma31_uns", bp_patch_sigma31_uns, METH_VARARGS}, {NULL, NULL} }; extern "C" void init_sigma31_layers(){ (void) Py_InitModule("_sigma31_layers", _sigma31_layers); import_array(); for(int gpu = 0; gpu < N_GPUS; gpu++){ for(int l = 0; l < N_SIGMAS; l++){ sigma31s_c[gpu][l] = 0; } for(int layer = 0; layer < N_OUTPUTS; layer++){ sum_res_c[gpu][layer] = 0; } F1s_c[gpu] = 0; F2s_c[gpu] = 0; F3s_c[gpu] = 0; FLs_c[gpu] = 0; sigma11s_c[gpu] = 0; FL321s_c[gpu] = 0; N_Cs[gpu] = 0; n_inds[gpu] = 0; n_inds_FL321[gpu] = 0; for(int layer = 0; layer < 5; layer++){ F_sum_c[gpu][layer] = 0; F_partial_c[gpu][layer] = 0; } } return; }
178bea257b1133bc01dad8de2073fbe76caa7587.cu
#include "Python.h" #include "arrayobject.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda_runtime.h> #define IND_DTYPE unsigned long long #define O3_IND(A,B,C,D)((D) + (C)*max_output_sz3 + (B)*max_output_sz3_max_output_sz3 + (A)*max_output_sz3_max_output_sz3_n3) #define O2_IND(A,B,C,D)((D) + (C)*max_output_sz2 + (B)*max_output_sz2_max_output_sz2 + (A)*max_output_sz2_max_output_sz2_n2) #define O1_IND(A,B,C,D)((D) + (C)*max_output_sz1 + (B)*max_output_sz1_max_output_sz1 + (A)*max_output_sz1_max_output_sz1_n1) #define S1_IND(A,B,C,D,E)((E) + (D)*s1 + (C)*s1_s1 + (B)*s1_s1_n1 + (A)*s1_s1_n1_3) #define S2_IND(A,B,C,D,E)((E) + (D)*s2 + (C)*s2_s2 + (B)*s2_s2_n1 + (A)*s2_s2_n1_n2) #define S3_IND(A,B,C,D,E)((E) + (D)*s3 + (C)*s3_s3 + (B)*s3_s3_n2 + (A)*s3_s3_n2_n3) #define SL_IND(A,B,C,D)((D) + (C)*max_output_sz3 + (B)*max_output_sz3_max_output_sz3 + (A)*max_output_sz3_max_output_sz3_n3) #define F1_IND(A,B,C,D)(D + (s1)*C + (s1*s1)*B + (s1*s1*n0)*A) #define F2_IND(A,B,C,D)(D + (s2)*C + (s2*s2)*B + (s2*s2*n1)*A) #define F3_IND(A,B,C,D)(D + (s3)*C + (s3*s3)*B + (s3*s3*n2)*A) #define FL_IND(A,B,C,D)(D + (max_output_sz3)*C + (max_output_sz3*max_output_sz3)*B + (max_output_sz3*max_output_sz3*n3)*A) #define I_IND(A,B,C,D)((D) + (C)*img_sz + (B)*img_sz_img_sz + (A)*img_sz_img_sz_3) #define CHECK_CUDA_ERR {err = cudaGetLastError();if(err != cudaSuccess){\ printf("CUDA error: %s, %s, %i\n",cudaGetErrorString(err),__FILE__,__LINE__);return NULL;}} #define DATA_TYPE_SZ sizeof(float) #define N_OUTPUTS 10 #define N_SIGMAS 10 #define N_LAYERS 4 #define N_GPUS 4 float * sum_res_c[N_GPUS][N_OUTPUTS]; int deriv_layer_ind_res[N_GPUS][N_OUTPUTS]; int N_C, n1, n0=3, s1, n2, s2, n3, s3, max_output_sz3; // GPU pointers, one for each GPU float *F1s_c[N_GPUS], *F2s_c[N_GPUS], *F3s_c[N_GPUS], *FLs_c[N_GPUS]; float *sigma31s_c[N_GPUS][N_SIGMAS]; // second dimension is the layer, generally not all GPUs will have all sigmas. float *sigma11s_c[N_GPUS]; IND_DTYPE sigma11_len[N_GPUS], *inds_c[N_GPUS], *offsets_c[N_GPUS]; IND_DTYPE n_inds[N_GPUS], n_inds_FL321[N_GPUS]; int N_Cs[N_GPUS]; float *FL321s_c[N_GPUS]; float *F_sum_c[N_GPUS][5], *F_partial_c[N_GPUS][5]; int dims_F_sum[N_GPUS][5][4]; int n1s[N_GPUS][N_SIGMAS], n0s[N_GPUS][N_SIGMAS], s1s[N_GPUS][N_SIGMAS], n2s[N_GPUS][N_SIGMAS], s2s[N_GPUS][N_SIGMAS], n3s[N_GPUS][N_SIGMAS], s3s[N_GPUS][N_SIGMAS], max_output_sz3s[N_GPUS][N_SIGMAS]; int max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0_n1s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3_s2s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3_n3s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3_s3s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3_s3s[N_GPUS][N_SIGMAS], max_output_sz3_max_output_sz3s[N_GPUS][N_SIGMAS], z2b[N_GPUS][N_SIGMAS]; #include "set_sigma_buffer.c" #include "set_sigma11_buffer.c" #include "set_FL321_buffer.c" #include "einsum_deriv_gpu.cu" #include "einsum_return.cu" #include "set_filter_buffers.cu" #include "compute_sigma31_full_gpu.cu" #include "compute_patch_inds.cu" #include "compute_patch_inds_addresses.cu" #include "compute_sigma11_gpu.cu" #include "compute_sigma11.cu" #include "max_pool_locs.cu" #include "compute_F_prod_inds.cu" #include "compute_F_layer_sum_inds.cu" #include "compute_F_layer_sum_deriv_inds.cu" #include "compute_F_layer_sum_deriv_inds_gpu.cu" #include "compute_F_layer_sum_deriv_inds_gpu_return.cu" #include "compute_sigma11_lin_gpu.cu" #include "pred_deriv_gpu.cu" #include "set_img_from_patches.cu" #include "bp_patch_sigma31.cu" #include "bp_patch_sigma31_sup.cu" #include "bp_patch_sigma31_uns.cu" static PyMethodDef _sigma31_layers[] = { {"compute_sigma31_full_gpu", compute_sigma31_full_gpu, METH_VARARGS}, {"compute_patch_inds", compute_patch_inds, METH_VARARGS}, {"compute_patch_inds_addresses", compute_patch_inds_addresses, METH_VARARGS}, {"compute_F_prod_inds", compute_F_prod_inds, METH_VARARGS}, {"compute_F_layer_sum_inds", compute_F_layer_sum_inds, METH_VARARGS}, {"compute_F_layer_sum_deriv_inds", compute_F_layer_sum_deriv_inds, METH_VARARGS}, {"compute_F_layer_sum_deriv_inds_gpu", compute_F_layer_sum_deriv_inds_gpu, METH_VARARGS}, {"compute_F_layer_sum_deriv_inds_gpu_return", compute_F_layer_sum_deriv_inds_gpu_return, METH_VARARGS}, {"compute_sigma11", compute_sigma11, METH_VARARGS}, {"compute_sigma11_gpu", compute_sigma11_gpu, METH_VARARGS}, {"max_pool_locs", max_pool_locs, METH_VARARGS}, {"einsum_deriv_gpu", einsum_deriv_gpu, METH_VARARGS}, {"set_sigma_buffer", set_sigma_buffer, METH_VARARGS}, {"set_sigma11_buffer", set_sigma11_buffer, METH_VARARGS}, {"set_FL321_buffer", set_FL321_buffer, METH_VARARGS}, {"set_filter_buffers", set_filter_buffers, METH_VARARGS}, {"einsum_return", einsum_return, METH_VARARGS}, {"compute_sigma11_lin_gpu", compute_sigma11_lin_gpu, METH_VARARGS}, {"pred_deriv_gpu", pred_deriv_gpu, METH_VARARGS}, {"set_img_from_patches", set_img_from_patches, METH_VARARGS}, {"bp_patch_sigma31", bp_patch_sigma31, METH_VARARGS}, {"bp_patch_sigma31_sup", bp_patch_sigma31_sup, METH_VARARGS}, {"bp_patch_sigma31_uns", bp_patch_sigma31_uns, METH_VARARGS}, {NULL, NULL} }; extern "C" void init_sigma31_layers(){ (void) Py_InitModule("_sigma31_layers", _sigma31_layers); import_array(); for(int gpu = 0; gpu < N_GPUS; gpu++){ for(int l = 0; l < N_SIGMAS; l++){ sigma31s_c[gpu][l] = 0; } for(int layer = 0; layer < N_OUTPUTS; layer++){ sum_res_c[gpu][layer] = 0; } F1s_c[gpu] = 0; F2s_c[gpu] = 0; F3s_c[gpu] = 0; FLs_c[gpu] = 0; sigma11s_c[gpu] = 0; FL321s_c[gpu] = 0; N_Cs[gpu] = 0; n_inds[gpu] = 0; n_inds_FL321[gpu] = 0; for(int layer = 0; layer < 5; layer++){ F_sum_c[gpu][layer] = 0; F_partial_c[gpu][layer] = 0; } } return; }
3d7d50be5fa733bca579b406801105def15dae12.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_Phi4_Phi6.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int N = 1; double *t = NULL; hipMalloc(&t, XSIZE*YSIZE); double *q = NULL; hipMalloc(&q, XSIZE*YSIZE); const double lambda = 1; const double g = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_Phi4_Phi6), dim3(gridBlock),dim3(threadBlock), 0, 0, N,t,q,lambda,g); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_Phi4_Phi6), dim3(gridBlock),dim3(threadBlock), 0, 0, N,t,q,lambda,g); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_Phi4_Phi6), dim3(gridBlock),dim3(threadBlock), 0, 0, N,t,q,lambda,g); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3d7d50be5fa733bca579b406801105def15dae12.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_Phi4_Phi6.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int N = 1; double *t = NULL; cudaMalloc(&t, XSIZE*YSIZE); double *q = NULL; cudaMalloc(&q, XSIZE*YSIZE); const double lambda = 1; const double g = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_Phi4_Phi6<<<gridBlock,threadBlock>>>(N,t,q,lambda,g); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_Phi4_Phi6<<<gridBlock,threadBlock>>>(N,t,q,lambda,g); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_Phi4_Phi6<<<gridBlock,threadBlock>>>(N,t,q,lambda,g); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ddbb50ecbec7f8f738122f2617598bd882799dad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "AvgColour.cuh" #include "AssignAvgColKernel.cuh" #include "Math.cuh" #include <thrust/device_vector.h> namespace ced { namespace gpu { void avgColour( float* _red, float* _green, float* _blue, int* _pixIds, float& _r, float& _g, float& _b, int _amountOfValues, int _amountOfPixels) { int sizeOfIDs = _amountOfValues; // set new device vector thrust::device_vector<float> d_nred(_amountOfPixels, 0.0f); thrust::device_vector<float> d_ngreen(_amountOfPixels, 0.0f); thrust::device_vector<float> d_nblue(_amountOfPixels, 0.0f); // assign the colour to avg float* d_nred_ptr = thrust::raw_pointer_cast(d_nred.data()); float* d_ngreen_ptr = thrust::raw_pointer_cast(d_ngreen.data()); float* d_nblue_ptr = thrust::raw_pointer_cast(d_nblue.data()); hipLaunchKernelGGL(( assignAvgCol), dim3(1), dim3(sizeOfIDs*sizeOfIDs), 0, 0, _red, _green, _blue, d_nred_ptr, d_ngreen_ptr, d_nblue_ptr, _pixIds, sizeOfIDs); hipDeviceSynchronize(); // reduce to get the sum to then divide _r = thrust::reduce(d_nred.begin(), d_nred.end()); _g = thrust::reduce(d_ngreen.begin(), d_ngreen.end()); _b = thrust::reduce(d_nblue.begin(), d_nblue.end()); //// divide by the amount of pixels _r /= static_cast<float>(sizeOfIDs); _g /= static_cast<float>(sizeOfIDs); _b /= static_cast<float>(sizeOfIDs); } } }
ddbb50ecbec7f8f738122f2617598bd882799dad.cu
#include "AvgColour.cuh" #include "AssignAvgColKernel.cuh" #include "Math.cuh" #include <thrust/device_vector.h> namespace ced { namespace gpu { void avgColour( float* _red, float* _green, float* _blue, int* _pixIds, float& _r, float& _g, float& _b, int _amountOfValues, int _amountOfPixels) { int sizeOfIDs = _amountOfValues; // set new device vector thrust::device_vector<float> d_nred(_amountOfPixels, 0.0f); thrust::device_vector<float> d_ngreen(_amountOfPixels, 0.0f); thrust::device_vector<float> d_nblue(_amountOfPixels, 0.0f); // assign the colour to avg float* d_nred_ptr = thrust::raw_pointer_cast(d_nred.data()); float* d_ngreen_ptr = thrust::raw_pointer_cast(d_ngreen.data()); float* d_nblue_ptr = thrust::raw_pointer_cast(d_nblue.data()); assignAvgCol<<<1, sizeOfIDs*sizeOfIDs>>>( _red, _green, _blue, d_nred_ptr, d_ngreen_ptr, d_nblue_ptr, _pixIds, sizeOfIDs); cudaDeviceSynchronize(); // reduce to get the sum to then divide _r = thrust::reduce(d_nred.begin(), d_nred.end()); _g = thrust::reduce(d_ngreen.begin(), d_ngreen.end()); _b = thrust::reduce(d_nblue.begin(), d_nblue.end()); //// divide by the amount of pixels _r /= static_cast<float>(sizeOfIDs); _g /= static_cast<float>(sizeOfIDs); _b /= static_cast<float>(sizeOfIDs); } } }
262c45352bf3df994dd9bd972689a32a300167a2.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <iomanip> #include <hip/hip_runtime.h> using namespace std; void MatrixRandBin(float *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if ((float)rand()/RAND_MAX > 0.5) { mat[i*cols+j] = 1.0f; }else { mat[i*cols+j] = -1.0f; } } } } void MatrixPrint(float *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { cout << setw(2) << mat[i*cols+j] << " "; } cout << endl; } cout << endl; } void MatrixPrintD(int *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { cout << setw(2) << mat[i*cols+j] << " "; } cout << endl; } cout << endl; } float MatrixCompare(float *a, float *b, int rows, int cols) { float err = 0; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { err += abs(a[i*cols+j]-b[i*cols+j]); } } return err; } void MatrixMul_host(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) { for (int i = 0; i < a_rows; i++) { for (int j = 0; j < b_cols; j++) { float t = 0; for (int k = 0; k < b_rows; k++) { t += a[i*a_cols+k]*b[k*b_cols+j]; } c[i*b_cols+j] = t; } } } //horizontal __global__ void AMatrix2Bin(float *a, int *a_bin, int pitch_a, int Pitch_a_bin, int a_rows, int MaxBlocks, int BINSIZE) { int tix = threadIdx.x; int bix = blockIdx.x; int bdx = blockDim.x; int gdx = gridDim.x; int maxThreads = MaxBlocks*a_rows; for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) { int rid = id/MaxBlocks; int cid = id%MaxBlocks; int Integer = 0; int base = 1; for (int i = 0; i < BINSIZE; i++) { if (a[rid*pitch_a+(cid+1)*BINSIZE-1-i] == 1.f) { Integer += base; } base = base<<1; } a_bin[rid*Pitch_a_bin+cid] = Integer; } } //vetical __global__ void BMatrix2Bin(float *b, int *b_bin, int pitch_b, int Pitch_b_bin, int b_cols, int MaxBlocks, int BINSIZE) { int tix = threadIdx.x; int bix = blockIdx.x; int bdx = blockDim.x; int gdx = gridDim.x; int maxThreads = MaxBlocks*b_cols; for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) { int cid = id/MaxBlocks; int rid = id%MaxBlocks; int Integer = 0; int base = 1; for (int i=0; i < BINSIZE; i++) { if (b[((rid+1)*BINSIZE-1-i)*pitch_b+cid] == 1.f) { Integer += base; } base = base<<1; } b_bin[rid*Pitch_b_bin+cid] = Integer; } } // __device__ unsigned char __popcount_tab_copy[256];//__constant__ is slower than __device__ // __device__ int popcount (int x) { // return __popcount_tab_copy[(x >> 0) & 0xff] // + __popcount_tab_copy[(x >> 8) & 0xff] // + __popcount_tab_copy[(x >> 16) & 0xff] // + __popcount_tab_copy[(x >> 24) & 0xff]; // } __global__ void MatrixMulXnor(int *a, int *b, float *result, unsigned char *__popcount_tab, int pitch_a, int pitch_b, int pitch_result, int midBlocks, int BINSIZE, int RealMidSize) { int tiy = threadIdx.x; int tix = threadIdx.y; int bix = blockIdx.x; int biy = blockIdx.y; int gdx = gridDim.x; int gdy = gridDim.y; int RectSize = blockDim.x; int rest = BINSIZE*RectSize*midBlocks-RealMidSize; __shared__ unsigned char __popcount_tab_shared[256]; __shared__ int a_rect_shared[8][8]; __shared__ int b_rect_shared[8][8]; for (int i = tix*RectSize+tiy; i < 256; i += RectSize*RectSize) { __popcount_tab_shared[i] = __popcount_tab[i]; } __syncthreads(); int sum = 0; for (int i = 0; i < midBlocks; i++) { a_rect_shared[tix][tiy] = a[(bix*RectSize+tix)*pitch_a+i*RectSize+tiy]; b_rect_shared[tix][tiy] = b[(i*RectSize+tix)*pitch_b+biy*RectSize+tiy]; __syncthreads(); int bin = 0; bin = a_rect_shared[tix][0]^b_rect_shared[0][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][1]^b_rect_shared[1][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][2]^b_rect_shared[2][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][3]^b_rect_shared[3][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][4]^b_rect_shared[4][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][5]^b_rect_shared[5][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][6]^b_rect_shared[6][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][7]^b_rect_shared[7][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][8]^b_rect_shared[8][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][9]^b_rect_shared[9][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][10]^b_rect_shared[10][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][11]^b_rect_shared[11][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][12]^b_rect_shared[12][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][13]^b_rect_shared[13][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][14]^b_rect_shared[14][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][15]^b_rect_shared[15][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][16]^b_rect_shared[16][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][17]^b_rect_shared[17][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][18]^b_rect_shared[18][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][19]^b_rect_shared[19][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][20]^b_rect_shared[20][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][21]^b_rect_shared[21][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][22]^b_rect_shared[22][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][23]^b_rect_shared[23][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][24]^b_rect_shared[24][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][25]^b_rect_shared[25][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][26]^b_rect_shared[26][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][27]^b_rect_shared[27][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][28]^b_rect_shared[28][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][29]^b_rect_shared[29][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][30]^b_rect_shared[30][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][31]^b_rect_shared[31][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); __syncthreads(); } result[(bix*RectSize+tix)*pitch_result+biy*RectSize+tiy] = sum-rest; // num=0; // int rest=(BINSIZE*a_cols-RealMidSize); // for(int i=bix;i<a_rows;i+=gdx){ // for(int j=tix;j<b_cols;j+=bdx){ // // printf("i=%d ; j=%d\n",i,j); // int sum=0; // for(int k=0;k<a_cols;k++){ // int bin=(a_shared[num*a_cols+k]^b[k*pitch_b+j]); // int negnum=popcount(bin); // int posnum=BINSIZE-negnum; // //calculate ignores the rest of BINSIZE if the Matsize can't devided by BINSIZE ,it can cause err // //(10/00)'(01/00) should be 0000 but it is 0011,so 1+1 is trash in the result.and it mislead a_rows*b_cols times. // sum+=(posnum-negnum); // } // result[i*pitch_result+j]=sum-rest; // } // num++; // } } void MatrixMul_device(float *a, float *b, int a_rows, int a_cols, int b_cols, float *result) { int BINSIZE = 32;//size of bin2int, 32 means 0000 0000 0000 0000 0000 0000 0000 0000 int MaxBlocks = (a_cols-1)/BINSIZE+1; int Copysize = MaxBlocks*BINSIZE; float *a_copy;//a_rows * Copysize float *b_copy;//Copysize * b_cols size_t Pitch_a_copy, Pitch_b_copy; hipMallocPitch((void**)&a_copy, &Pitch_a_copy, sizeof(float)*Copysize, a_rows); hipMallocPitch((void**)&b_copy, &Pitch_b_copy, sizeof(float)*b_cols, Copysize); hipMemset(a_copy, 0, Pitch_a_copy*a_rows); hipMemset(b_copy, 0, Pitch_b_copy*Copysize); hipMemcpy2D(a_copy, Pitch_a_copy, a, sizeof(float)*a_cols, sizeof(float)*a_cols, a_rows, hipMemcpyDeviceToDevice); hipMemcpy2D(b_copy, Pitch_b_copy, b, sizeof(float)*b_cols, sizeof(float)*b_cols, a_cols, hipMemcpyDeviceToDevice); //check oringin // float *a_host; // float *b_host; // a_host = (float*) malloc(sizeof(float) * Copysize * a_rows); // b_host = (float*) malloc(sizeof(float) * b_cols * Copysize); // hipMemcpy2D(a_host,sizeof(float) *Copysize, a_copy,Pitch_a_copy,sizeof(float) *Copysize , a_rows,hipMemcpyDeviceToHost); // hipMemcpy2D(b_host,sizeof(float) *b_cols, b_copy,Pitch_b_copy,sizeof(float) *b_cols , Copysize,hipMemcpyDeviceToHost); // MatrixPrint(a_host,a_rows,Copysize); // MatrixPrint(b_host,Copysize,b_cols); int RectBlockSize = 4; dim3 RectBlockNum_a_bin((a_rows-1)/RectBlockSize+1, (MaxBlocks-1)/RectBlockSize+1, 1);//with block multiply dim3 RectBlockNum_b_bin((MaxBlocks-1)/RectBlockSize+1, (b_cols-1)/RectBlockSize+1, 1); int *a_bin; int *b_bin; size_t Pitch_a_bin, Pitch_b_bin; hipMallocPitch((void**)&a_bin , &Pitch_a_bin , sizeof(int)*RectBlockSize*RectBlockNum_a_bin.y, RectBlockSize*RectBlockNum_a_bin.x); hipMallocPitch((void**)&b_bin , &Pitch_b_bin , sizeof(int)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_b_bin.x); hipMemset(a_bin, 0, Pitch_a_bin*RectBlockSize*RectBlockNum_a_bin.x); hipMemset(b_bin, 0, Pitch_b_bin*RectBlockSize*RectBlockNum_b_bin.x); dim3 BS_BIN(512,1,1); dim3 GS_BIN(6,1,1); hipLaunchKernelGGL(( AMatrix2Bin), dim3(GS_BIN), dim3(BS_BIN) , 0, 0, a_copy, a_bin, Pitch_a_copy/sizeof(float), Pitch_a_bin/sizeof(int), a_rows, MaxBlocks, BINSIZE); hipLaunchKernelGGL(( BMatrix2Bin), dim3(GS_BIN), dim3(BS_BIN) , 0, 0, b_copy, b_bin, Pitch_b_copy/sizeof(float), Pitch_b_bin/sizeof(int), b_cols, MaxBlocks, BINSIZE); hipFree(a_copy); hipFree(b_copy); //check bin // int *a_host_bin; // int *b_host_bin; // a_host_bin = (int*) malloc(sizeof(int) *MaxBlocks * a_rows); // b_host_bin = (int*) malloc(sizeof(int) *b_cols * MaxBlocks); // hipMemcpy2D(a_host_bin,sizeof(int) *MaxBlocks, a_bin,Pitch_a_bin,sizeof(int) *MaxBlocks , a_rows ,hipMemcpyDeviceToHost); // hipMemcpy2D(b_host_bin,sizeof(int) *b_cols, b_bin,Pitch_b_bin,sizeof(int) *b_cols , MaxBlocks ,hipMemcpyDeviceToHost); // MatrixPrintD(a_host_bin,a_rows,MaxBlocks); // MatrixPrintD(b_host_bin,MaxBlocks,b_cols); float *result_bin;//a_rows * b_cols size_t Pitch_result_bin; hipMallocPitch((void**)&result_bin , &Pitch_result_bin , sizeof(float)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_a_bin.x); const unsigned char __popcount_tab[] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5, 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6, 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6, 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7, 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6, 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7, 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7, 3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8, }; unsigned char *__popcount_tab_copy; hipMalloc((void**)&__popcount_tab_copy, sizeof(__popcount_tab)); hipMemcpy(__popcount_tab_copy, __popcount_tab, sizeof(__popcount_tab), hipMemcpyHostToDevice); hipEvent_t start_device, stop_device; float time_device; hipEventCreate(&start_device); hipEventCreate(&stop_device); hipEventRecord(start_device, 0); dim3 BS_MM(RectBlockSize, RectBlockSize, 1); dim3 GS_MM(RectBlockNum_a_bin.x, RectBlockNum_b_bin.y, 1); hipLaunchKernelGGL(( MatrixMulXnor), dim3(GS_MM), dim3(BS_MM) , 0, 0, a_bin, b_bin, result_bin, __popcount_tab_copy, Pitch_a_bin/sizeof(int), Pitch_b_bin/sizeof(int), Pitch_result_bin/sizeof(float), RectBlockNum_a_bin.y, BINSIZE, a_cols); hipEventRecord( stop_device, 0 ); hipEventSynchronize( stop_device ); hipEventElapsedTime( &time_device, start_device, stop_device ); hipEventDestroy( start_device ); hipEventDestroy( stop_device ); cout<<"gputime="<<time_device<<"ms"<<endl; hipMemcpy2D(result,sizeof(float) *b_cols, result_bin,Pitch_result_bin,sizeof(float) *b_cols , a_rows ,hipMemcpyDeviceToDevice); hipFree(a_bin); hipFree(b_bin); hipFree(result_bin); } int main(){ //simulate pytorch param int x=1000; int n=1000; int y=1000; float *a_host; float *b_host; float *result_host; a_host = (float*) malloc(sizeof(float) * x * n); b_host = (float*) malloc(sizeof(float) * n * y); result_host = (float*) malloc(sizeof(float) * x * y); srand(0); MatrixRandBin(a_host,x,n); MatrixRandBin(b_host,n,y); // cout<<MatrixCopysize<<endl; float *a_copy; float *b_copy; float *result_device; hipMalloc((void**)&a_copy,sizeof(float) *x * n); hipMalloc((void**)&b_copy,sizeof(float) *n * y); hipMalloc((void**)&result_device,sizeof(float) *x * y); hipMemcpy(a_copy,a_host,sizeof(float) *x * n,hipMemcpyHostToDevice); hipMemcpy(b_copy,b_host,sizeof(float) *n * y,hipMemcpyHostToDevice); // MatrixPrint(a_host,Matrixsize,Matrixsize); // MatrixPrint(b_host,Matrixsize,Matrixsize); //run in gpu warp in C code MatrixMul_device(a_copy,b_copy,x,n,y,result_device); hipMemcpy(result_host, result_device,sizeof(float) *x * y,hipMemcpyDeviceToHost); hipFree(a_copy); hipFree(b_copy); hipFree(result_device); // MatrixPrint(result_host,Matrixsize,Matrixsize); // //run in cpu // float *result_cpu; // result_cpu = (float*) malloc(sizeof(float) * x * y); // clock_t start_host = clock(); // MatrixMul_host(a_host,x,n,b_host,n,y,result_cpu); // cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl; // // MatrixPrint(result_cpu,Matrixsize,Matrixsize); // //compare value of gpu and cpu // float err=MatrixCompare(result_cpu,result_host,x,y); // cout<<"err in gpu and cpu = "<<err<<endl; return 0; }
262c45352bf3df994dd9bd972689a32a300167a2.cu
#include <iostream> #include <stdio.h> #include <iomanip> #include <cuda_runtime.h> using namespace std; void MatrixRandBin(float *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if ((float)rand()/RAND_MAX > 0.5) { mat[i*cols+j] = 1.0f; }else { mat[i*cols+j] = -1.0f; } } } } void MatrixPrint(float *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { cout << setw(2) << mat[i*cols+j] << " "; } cout << endl; } cout << endl; } void MatrixPrintD(int *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { cout << setw(2) << mat[i*cols+j] << " "; } cout << endl; } cout << endl; } float MatrixCompare(float *a, float *b, int rows, int cols) { float err = 0; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { err += abs(a[i*cols+j]-b[i*cols+j]); } } return err; } void MatrixMul_host(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) { for (int i = 0; i < a_rows; i++) { for (int j = 0; j < b_cols; j++) { float t = 0; for (int k = 0; k < b_rows; k++) { t += a[i*a_cols+k]*b[k*b_cols+j]; } c[i*b_cols+j] = t; } } } //horizontal __global__ void AMatrix2Bin(float *a, int *a_bin, int pitch_a, int Pitch_a_bin, int a_rows, int MaxBlocks, int BINSIZE) { int tix = threadIdx.x; int bix = blockIdx.x; int bdx = blockDim.x; int gdx = gridDim.x; int maxThreads = MaxBlocks*a_rows; for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) { int rid = id/MaxBlocks; int cid = id%MaxBlocks; int Integer = 0; int base = 1; for (int i = 0; i < BINSIZE; i++) { if (a[rid*pitch_a+(cid+1)*BINSIZE-1-i] == 1.f) { Integer += base; } base = base<<1; } a_bin[rid*Pitch_a_bin+cid] = Integer; } } //vetical __global__ void BMatrix2Bin(float *b, int *b_bin, int pitch_b, int Pitch_b_bin, int b_cols, int MaxBlocks, int BINSIZE) { int tix = threadIdx.x; int bix = blockIdx.x; int bdx = blockDim.x; int gdx = gridDim.x; int maxThreads = MaxBlocks*b_cols; for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) { int cid = id/MaxBlocks; int rid = id%MaxBlocks; int Integer = 0; int base = 1; for (int i=0; i < BINSIZE; i++) { if (b[((rid+1)*BINSIZE-1-i)*pitch_b+cid] == 1.f) { Integer += base; } base = base<<1; } b_bin[rid*Pitch_b_bin+cid] = Integer; } } // __device__ unsigned char __popcount_tab_copy[256];//__constant__ is slower than __device__ // __device__ int popcount (int x) { // return __popcount_tab_copy[(x >> 0) & 0xff] // + __popcount_tab_copy[(x >> 8) & 0xff] // + __popcount_tab_copy[(x >> 16) & 0xff] // + __popcount_tab_copy[(x >> 24) & 0xff]; // } __global__ void MatrixMulXnor(int *a, int *b, float *result, unsigned char *__popcount_tab, int pitch_a, int pitch_b, int pitch_result, int midBlocks, int BINSIZE, int RealMidSize) { int tiy = threadIdx.x; int tix = threadIdx.y; int bix = blockIdx.x; int biy = blockIdx.y; int gdx = gridDim.x; int gdy = gridDim.y; int RectSize = blockDim.x; int rest = BINSIZE*RectSize*midBlocks-RealMidSize; __shared__ unsigned char __popcount_tab_shared[256]; __shared__ int a_rect_shared[8][8]; __shared__ int b_rect_shared[8][8]; for (int i = tix*RectSize+tiy; i < 256; i += RectSize*RectSize) { __popcount_tab_shared[i] = __popcount_tab[i]; } __syncthreads(); int sum = 0; for (int i = 0; i < midBlocks; i++) { a_rect_shared[tix][tiy] = a[(bix*RectSize+tix)*pitch_a+i*RectSize+tiy]; b_rect_shared[tix][tiy] = b[(i*RectSize+tix)*pitch_b+biy*RectSize+tiy]; __syncthreads(); int bin = 0; bin = a_rect_shared[tix][0]^b_rect_shared[0][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][1]^b_rect_shared[1][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][2]^b_rect_shared[2][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][3]^b_rect_shared[3][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][4]^b_rect_shared[4][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][5]^b_rect_shared[5][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][6]^b_rect_shared[6][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); bin = a_rect_shared[tix][7]^b_rect_shared[7][tiy]; sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] + __popcount_tab_shared[(bin >> 8) & 0xff] + __popcount_tab_shared[(bin >> 16) & 0xff] + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][8]^b_rect_shared[8][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][9]^b_rect_shared[9][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][10]^b_rect_shared[10][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][11]^b_rect_shared[11][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][12]^b_rect_shared[12][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][13]^b_rect_shared[13][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][14]^b_rect_shared[14][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][15]^b_rect_shared[15][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][16]^b_rect_shared[16][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][17]^b_rect_shared[17][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][18]^b_rect_shared[18][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][19]^b_rect_shared[19][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][20]^b_rect_shared[20][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][21]^b_rect_shared[21][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][22]^b_rect_shared[22][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][23]^b_rect_shared[23][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][24]^b_rect_shared[24][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][25]^b_rect_shared[25][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][26]^b_rect_shared[26][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][27]^b_rect_shared[27][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][28]^b_rect_shared[28][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][29]^b_rect_shared[29][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][30]^b_rect_shared[30][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); // bin = a_rect_shared[tix][31]^b_rect_shared[31][tiy]; // sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff] // + __popcount_tab_shared[(bin >> 8) & 0xff] // + __popcount_tab_shared[(bin >> 16) & 0xff] // + __popcount_tab_shared[(bin >> 24) & 0xff]); __syncthreads(); } result[(bix*RectSize+tix)*pitch_result+biy*RectSize+tiy] = sum-rest; // num=0; // int rest=(BINSIZE*a_cols-RealMidSize); // for(int i=bix;i<a_rows;i+=gdx){ // for(int j=tix;j<b_cols;j+=bdx){ // // printf("i=%d ; j=%d\n",i,j); // int sum=0; // for(int k=0;k<a_cols;k++){ // int bin=(a_shared[num*a_cols+k]^b[k*pitch_b+j]); // int negnum=popcount(bin); // int posnum=BINSIZE-negnum; // //calculate ignores the rest of BINSIZE if the Matsize can't devided by BINSIZE ,it can cause err // //(10/00)'(01/00) should be 0000 but it is 0011,so 1+1 is trash in the result.and it mislead a_rows*b_cols times. // sum+=(posnum-negnum); // } // result[i*pitch_result+j]=sum-rest; // } // num++; // } } void MatrixMul_device(float *a, float *b, int a_rows, int a_cols, int b_cols, float *result) { int BINSIZE = 32;//size of bin2int, 32 means 0000 0000 0000 0000 0000 0000 0000 0000 int MaxBlocks = (a_cols-1)/BINSIZE+1; int Copysize = MaxBlocks*BINSIZE; float *a_copy;//a_rows * Copysize float *b_copy;//Copysize * b_cols size_t Pitch_a_copy, Pitch_b_copy; cudaMallocPitch((void**)&a_copy, &Pitch_a_copy, sizeof(float)*Copysize, a_rows); cudaMallocPitch((void**)&b_copy, &Pitch_b_copy, sizeof(float)*b_cols, Copysize); cudaMemset(a_copy, 0, Pitch_a_copy*a_rows); cudaMemset(b_copy, 0, Pitch_b_copy*Copysize); cudaMemcpy2D(a_copy, Pitch_a_copy, a, sizeof(float)*a_cols, sizeof(float)*a_cols, a_rows, cudaMemcpyDeviceToDevice); cudaMemcpy2D(b_copy, Pitch_b_copy, b, sizeof(float)*b_cols, sizeof(float)*b_cols, a_cols, cudaMemcpyDeviceToDevice); //check oringin // float *a_host; // float *b_host; // a_host = (float*) malloc(sizeof(float) * Copysize * a_rows); // b_host = (float*) malloc(sizeof(float) * b_cols * Copysize); // cudaMemcpy2D(a_host,sizeof(float) *Copysize, a_copy,Pitch_a_copy,sizeof(float) *Copysize , a_rows,cudaMemcpyDeviceToHost); // cudaMemcpy2D(b_host,sizeof(float) *b_cols, b_copy,Pitch_b_copy,sizeof(float) *b_cols , Copysize,cudaMemcpyDeviceToHost); // MatrixPrint(a_host,a_rows,Copysize); // MatrixPrint(b_host,Copysize,b_cols); int RectBlockSize = 4; dim3 RectBlockNum_a_bin((a_rows-1)/RectBlockSize+1, (MaxBlocks-1)/RectBlockSize+1, 1);//with block multiply dim3 RectBlockNum_b_bin((MaxBlocks-1)/RectBlockSize+1, (b_cols-1)/RectBlockSize+1, 1); int *a_bin; int *b_bin; size_t Pitch_a_bin, Pitch_b_bin; cudaMallocPitch((void**)&a_bin , &Pitch_a_bin , sizeof(int)*RectBlockSize*RectBlockNum_a_bin.y, RectBlockSize*RectBlockNum_a_bin.x); cudaMallocPitch((void**)&b_bin , &Pitch_b_bin , sizeof(int)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_b_bin.x); cudaMemset(a_bin, 0, Pitch_a_bin*RectBlockSize*RectBlockNum_a_bin.x); cudaMemset(b_bin, 0, Pitch_b_bin*RectBlockSize*RectBlockNum_b_bin.x); dim3 BS_BIN(512,1,1); dim3 GS_BIN(6,1,1); AMatrix2Bin<<< GS_BIN, BS_BIN >>>(a_copy, a_bin, Pitch_a_copy/sizeof(float), Pitch_a_bin/sizeof(int), a_rows, MaxBlocks, BINSIZE); BMatrix2Bin<<< GS_BIN, BS_BIN >>>(b_copy, b_bin, Pitch_b_copy/sizeof(float), Pitch_b_bin/sizeof(int), b_cols, MaxBlocks, BINSIZE); cudaFree(a_copy); cudaFree(b_copy); //check bin // int *a_host_bin; // int *b_host_bin; // a_host_bin = (int*) malloc(sizeof(int) *MaxBlocks * a_rows); // b_host_bin = (int*) malloc(sizeof(int) *b_cols * MaxBlocks); // cudaMemcpy2D(a_host_bin,sizeof(int) *MaxBlocks, a_bin,Pitch_a_bin,sizeof(int) *MaxBlocks , a_rows ,cudaMemcpyDeviceToHost); // cudaMemcpy2D(b_host_bin,sizeof(int) *b_cols, b_bin,Pitch_b_bin,sizeof(int) *b_cols , MaxBlocks ,cudaMemcpyDeviceToHost); // MatrixPrintD(a_host_bin,a_rows,MaxBlocks); // MatrixPrintD(b_host_bin,MaxBlocks,b_cols); float *result_bin;//a_rows * b_cols size_t Pitch_result_bin; cudaMallocPitch((void**)&result_bin , &Pitch_result_bin , sizeof(float)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_a_bin.x); const unsigned char __popcount_tab[] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5, 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6, 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6, 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7, 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6, 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7, 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7, 3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8, }; unsigned char *__popcount_tab_copy; cudaMalloc((void**)&__popcount_tab_copy, sizeof(__popcount_tab)); cudaMemcpy(__popcount_tab_copy, __popcount_tab, sizeof(__popcount_tab), cudaMemcpyHostToDevice); cudaEvent_t start_device, stop_device; float time_device; cudaEventCreate(&start_device); cudaEventCreate(&stop_device); cudaEventRecord(start_device, 0); dim3 BS_MM(RectBlockSize, RectBlockSize, 1); dim3 GS_MM(RectBlockNum_a_bin.x, RectBlockNum_b_bin.y, 1); MatrixMulXnor<<< GS_MM, BS_MM >>>(a_bin, b_bin, result_bin, __popcount_tab_copy, Pitch_a_bin/sizeof(int), Pitch_b_bin/sizeof(int), Pitch_result_bin/sizeof(float), RectBlockNum_a_bin.y, BINSIZE, a_cols); cudaEventRecord( stop_device, 0 ); cudaEventSynchronize( stop_device ); cudaEventElapsedTime( &time_device, start_device, stop_device ); cudaEventDestroy( start_device ); cudaEventDestroy( stop_device ); cout<<"gputime="<<time_device<<"ms"<<endl; cudaMemcpy2D(result,sizeof(float) *b_cols, result_bin,Pitch_result_bin,sizeof(float) *b_cols , a_rows ,cudaMemcpyDeviceToDevice); cudaFree(a_bin); cudaFree(b_bin); cudaFree(result_bin); } int main(){ //simulate pytorch param int x=1000; int n=1000; int y=1000; float *a_host; float *b_host; float *result_host; a_host = (float*) malloc(sizeof(float) * x * n); b_host = (float*) malloc(sizeof(float) * n * y); result_host = (float*) malloc(sizeof(float) * x * y); srand(0); MatrixRandBin(a_host,x,n); MatrixRandBin(b_host,n,y); // cout<<MatrixCopysize<<endl; float *a_copy; float *b_copy; float *result_device; cudaMalloc((void**)&a_copy,sizeof(float) *x * n); cudaMalloc((void**)&b_copy,sizeof(float) *n * y); cudaMalloc((void**)&result_device,sizeof(float) *x * y); cudaMemcpy(a_copy,a_host,sizeof(float) *x * n,cudaMemcpyHostToDevice); cudaMemcpy(b_copy,b_host,sizeof(float) *n * y,cudaMemcpyHostToDevice); // MatrixPrint(a_host,Matrixsize,Matrixsize); // MatrixPrint(b_host,Matrixsize,Matrixsize); //run in gpu warp in C code MatrixMul_device(a_copy,b_copy,x,n,y,result_device); cudaMemcpy(result_host, result_device,sizeof(float) *x * y,cudaMemcpyDeviceToHost); cudaFree(a_copy); cudaFree(b_copy); cudaFree(result_device); // MatrixPrint(result_host,Matrixsize,Matrixsize); // //run in cpu // float *result_cpu; // result_cpu = (float*) malloc(sizeof(float) * x * y); // clock_t start_host = clock(); // MatrixMul_host(a_host,x,n,b_host,n,y,result_cpu); // cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl; // // MatrixPrint(result_cpu,Matrixsize,Matrixsize); // //compare value of gpu and cpu // float err=MatrixCompare(result_cpu,result_host,x,y); // cout<<"err in gpu and cpu = "<<err<<endl; return 0; }
9fb2410f0cdd7d0835c8102b9ce1580726c38acf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "json_reader_impl.hpp" #include <algorithm> #include <iostream> #include <map> #include <memory> #include <numeric> #include <tuple> #include <utility> #include <vector> #include <utilities/legacy/cudf_utils.h> #include <cudf/utilities/error.hpp> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <io/comp/io_uncomp.h> #include <io/legacy/cuio_common.hpp> #include <io/utilities/legacy/parsing_utils.cuh> namespace cudf { namespace io { namespace json { using string_pair = std::pair<const char *, size_t>; reader::Impl::Impl(std::unique_ptr<datasource> source, std::string filepath, reader_options const &options) : source_(std::move(source)), filepath_(filepath), args_(options) { CUDF_EXPECTS(args_.lines, "Only JSON Lines format is currently supported.\n"); d_true_trie_ = createSerializedTrie({"true"}); opts_.trueValuesTrie = d_true_trie_.data().get(); d_false_trie_ = createSerializedTrie({"false"}); opts_.falseValuesTrie = d_false_trie_.data().get(); d_na_trie_ = createSerializedTrie({"null"}); opts_.naValuesTrie = d_na_trie_.data().get(); } /**---------------------------------------------------------------------------* * @brief Estimates the maximum expected length or a row, based on the number * of columns * * If the number of columns is not available, it will return a value large * enough for most use cases * * @param[in] num_columns Number of columns in the JSON file (optional) * * @return Estimated maximum size of a row, in bytes *---------------------------------------------------------------------------**/ constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept { constexpr size_t max_row_bytes = 16 * 1024; // 16KB constexpr size_t column_bytes = 64; constexpr size_t base_padding = 1024; // 1KB if (num_columns == 0) { // Use flat size if the number of columns is not known return max_row_bytes; } else { // Expand the size based on the number of columns, if available return base_padding + num_columns * column_bytes; } } table reader::Impl::read(size_t range_offset, size_t range_size) { ingestRawInput(range_offset, range_size); CUDF_EXPECTS(buffer_ != nullptr, "Ingest failed: input data is null.\n"); decompressInput(); CUDF_EXPECTS(uncomp_data_ != nullptr, "Ingest failed: uncompressed input data is null.\n"); CUDF_EXPECTS(uncomp_size_ != 0, "Ingest failed: uncompressed input data has zero size.\n"); setRecordStarts(); CUDF_EXPECTS(!rec_starts_.empty(), "Error enumerating records.\n"); uploadDataToDevice(); CUDF_EXPECTS(data_.size() != 0, "Error uploading input data to the GPU.\n"); setColumnNames(); CUDF_EXPECTS(!column_names_.empty(), "Error determining column names.\n"); setDataTypes(); CUDF_EXPECTS(!dtypes_.empty(), "Error in data type detection.\n"); convertDataToColumns(); CUDF_EXPECTS(!columns_.empty(), "Error converting json input into gdf columns.\n"); // Transfer ownership to raw pointer output std::vector<gdf_column *> out_cols(columns_.size()); for (size_t i = 0; i < columns_.size(); ++i) { out_cols[i] = columns_[i].release(); } return table(out_cols.data(), out_cols.size()); } void reader::Impl::ingestRawInput(size_t range_offset, size_t range_size) { size_t map_range_size = 0; if (range_size != 0) { map_range_size = range_size + calculateMaxRowSize(args_.dtype.size()); } // Support delayed opening of the file if using memory mapping datasource // This allows only mapping of a subset of the file if using byte range if (source_ == nullptr) { assert(!filepath_.empty()); source_ = datasource::create(filepath_, range_offset, map_range_size); } if (!source_->empty()) { auto data_size = (map_range_size != 0) ? map_range_size : source_->size(); buffer_ = source_->get_buffer(range_offset, data_size); } byte_range_offset_ = range_offset; byte_range_size_ = range_size; } void reader::Impl::decompressInput() { const auto compression_type = infer_compression_type( args_.compression, filepath_, {{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}}); if (compression_type == "none") { // Do not use the owner vector here to avoid extra copy uncomp_data_ = reinterpret_cast<const char *>(buffer_->data()); uncomp_size_ = buffer_->size(); } else { CUDF_EXPECTS(getUncompressedHostData( reinterpret_cast<const char *>(buffer_->data()), buffer_->size(), compression_type, uncomp_data_owner_) == GDF_SUCCESS, "Input data decompression failed.\n"); uncomp_data_ = uncomp_data_owner_.data(); uncomp_size_ = uncomp_data_owner_.size(); } } void reader::Impl::setRecordStarts() { std::vector<char> chars_to_count{'\n'}; // Currently, ignoring lineterminations within quotes is handled by recording the records of both, // and then filtering out the records that is a quotechar or a linetermination within a quotechar pair. if (allow_newlines_in_strings_) { chars_to_count.push_back('\"'); } // If not starting at an offset, add an extra row to account for the first row in the file const auto prefilter_count = countAllFromSet(uncomp_data_, uncomp_size_, chars_to_count) + ((byte_range_offset_ == 0) ? 1 : 0); rec_starts_.resize(prefilter_count); auto *find_result_ptr = rec_starts_.data().get(); // Manually adding an extra row to account for the first row in the file if (byte_range_offset_ == 0) { find_result_ptr++; CUDA_TRY(hipMemsetAsync(rec_starts_.data().get(), 0ull, sizeof(uint64_t))); } std::vector<char> chars_to_find{'\n'}; if (allow_newlines_in_strings_) { chars_to_find.push_back('\"'); } // Passing offset = 1 to return positions AFTER the found character findAllFromSet(uncomp_data_, uncomp_size_, chars_to_find, 1, find_result_ptr); // Previous call stores the record pinput_file.typeositions as encountered by all threads // Sort the record positions as subsequent processing may require filtering // certain rows or other processing on specific records thrust::sort(rmm::exec_policy()->on(0), rec_starts_.begin(), rec_starts_.end()); auto filtered_count = prefilter_count; if (allow_newlines_in_strings_) { thrust::host_vector<uint64_t> h_rec_starts = rec_starts_; bool quotation = false; for (cudf::size_type i = 1; i < prefilter_count; ++i) { if (uncomp_data_[h_rec_starts[i] - 1] == '\"') { quotation = !quotation; h_rec_starts[i] = uncomp_size_; filtered_count--; } else if (quotation) { h_rec_starts[i] = uncomp_size_; filtered_count--; } } rec_starts_ = h_rec_starts; thrust::sort(rmm::exec_policy()->on(0), rec_starts_.begin(), rec_starts_.end()); } // Exclude the ending newline as it does not precede a record start if (uncomp_data_[uncomp_size_ - 1] == '\n') { filtered_count--; } rec_starts_.resize(filtered_count); } void reader::Impl::uploadDataToDevice() { size_t start_offset = 0; size_t end_offset = uncomp_size_; // Trim lines that are outside range if (byte_range_size_ != 0 || byte_range_offset_ != 0) { thrust::host_vector<uint64_t> h_rec_starts = rec_starts_; if (byte_range_size_ != 0) { auto it = h_rec_starts.end() - 1; while (it >= h_rec_starts.begin() && *it > byte_range_size_) { end_offset = *it; --it; } h_rec_starts.erase(it + 1, h_rec_starts.end()); } // Resize to exclude rows outside of the range // Adjust row start positions to account for the data subcopy start_offset = h_rec_starts.front(); rec_starts_.resize(h_rec_starts.size()); thrust::transform(rmm::exec_policy()->on(0), rec_starts_.begin(), rec_starts_.end(), thrust::make_constant_iterator(start_offset), rec_starts_.begin(), thrust::minus<uint64_t>()); } const size_t bytes_to_upload = end_offset - start_offset; CUDF_EXPECTS(bytes_to_upload <= uncomp_size_, "Error finding the record within the specified byte range.\n"); // Upload the raw data that is within the rows of interest data_ = rmm::device_buffer(uncomp_data_ + start_offset, bytes_to_upload); } /**---------------------------------------------------------------------------* * @brief Extract value names from a JSON object * * @param[in] json_obj Host vector containing the JSON object * @param[in] opts Parsing options (e.g. delimiter and quotation character) * * @return std::vector<std::string> names of JSON object values *---------------------------------------------------------------------------**/ std::vector<std::string> getNamesFromJsonObject(const std::vector<char> &json_obj, const ParseOptions &opts) { enum class ParseState { preColName, colName, postColName }; std::vector<std::string> names; bool quotation = false; auto state = ParseState::preColName; int name_start = 0; for (size_t pos = 0; pos < json_obj.size(); ++pos) { if (state == ParseState::preColName) { if (json_obj[pos] == opts.quotechar) { name_start = pos + 1; state = ParseState::colName; continue; } } else if (state == ParseState::colName) { if (json_obj[pos] == opts.quotechar && json_obj[pos - 1] != '\\') { // if found a non-escaped quote character, it's the end of the column name names.emplace_back(&json_obj[name_start], &json_obj[pos]); state = ParseState::postColName; continue; } } else if (state == ParseState::postColName) { // TODO handle complex data types that might include unquoted commas if (!quotation && json_obj[pos] == opts.delimiter) { state = ParseState::preColName; continue; } else if (json_obj[pos] == opts.quotechar) { quotation = !quotation; } } } return names; } void reader::Impl::setColumnNames() { // If file only contains one row, use the file size for the row size uint64_t first_row_len = data_.size() / sizeof(char); if (rec_starts_.size() > 1) { // Set first_row_len to the offset of the second row, if it exists CUDA_TRY(hipMemcpyAsync(&first_row_len, rec_starts_.data().get() + 1, sizeof(uint64_t), hipMemcpyDeviceToHost)); } std::vector<char> first_row(first_row_len); CUDA_TRY(hipMemcpyAsync(first_row.data(), data_.data(), first_row_len * sizeof(char), hipMemcpyDeviceToHost)); CUDA_TRY(hipStreamSynchronize(0)); // Determine the row format between: // JSON array - [val1, val2, ...] and // JSON object - {"col1":val1, "col2":val2, ...} // based on the top level opening bracket const auto first_square_bracket = std::find(first_row.begin(), first_row.end(), '['); const auto first_curly_bracket = std::find(first_row.begin(), first_row.end(), '{'); CUDF_EXPECTS(first_curly_bracket != first_row.end() || first_square_bracket != first_row.end(), "Input data is not a valid JSON file."); // If the first opening bracket is '{', assume object format const bool is_object = first_curly_bracket < first_square_bracket; if (is_object) { column_names_ = getNamesFromJsonObject(first_row, opts_); } else { int cols_found = 0; bool quotation = false; for (size_t pos = 0; pos < first_row.size(); ++pos) { // Flip the quotation flag if current character is a quotechar if (first_row[pos] == opts_.quotechar) { quotation = !quotation; } // Check if end of a column/row else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts_.delimiter)) { column_names_.emplace_back(std::to_string(cols_found++)); } } } } void reader::Impl::convertDataToColumns() { const auto num_columns = dtypes_.size(); for (size_t col = 0; col < num_columns; ++col) { columns_.emplace_back(rec_starts_.size(), dtypes_[col], gdf_dtype_extra_info{TIME_UNIT_NONE}, column_names_[col]); columns_.back().allocate(); } thrust::host_vector<gdf_dtype> h_dtypes(num_columns); thrust::host_vector<void *> h_data(num_columns); thrust::host_vector<cudf::valid_type *> h_valid(num_columns); for (size_t i = 0; i < num_columns; ++i) { h_dtypes[i] = columns_[i]->dtype; h_data[i] = columns_[i]->data; h_valid[i] = columns_[i]->valid; } rmm::device_vector<gdf_dtype> d_dtypes = h_dtypes; rmm::device_vector<void *> d_data = h_data; rmm::device_vector<cudf::valid_type *> d_valid = h_valid; rmm::device_vector<cudf::size_type> d_valid_counts(num_columns, 0); convertJsonToColumns(d_dtypes.data().get(), d_data.data().get(), d_valid.data().get(), d_valid_counts.data().get()); CUDA_TRY(hipDeviceSynchronize()); CUDA_TRY(hipGetLastError()); thrust::host_vector<cudf::size_type> h_valid_counts = d_valid_counts; for (size_t i = 0; i < num_columns; ++i) { columns_[i]->null_count = columns_[i]->size - h_valid_counts[i]; } // Perform any final column preparation (may reference decoded data) for (auto &column : columns_) { column.finalize(); } } /**---------------------------------------------------------------------------* * @brief Functor for converting plain text data to cuDF data type value. *---------------------------------------------------------------------------**/ struct ConvertFunctor { /**---------------------------------------------------------------------------* * @brief Template specialization for operator() for types whose values can be * convertible to a 0 or 1 to represent false/true. The converting is done by * checking against the default and user-specified true/false values list. * * It is handled here rather than within convertStrToValue() as that function * is used by other types (ex. timestamp) that aren't 'booleable'. *---------------------------------------------------------------------------**/ template <typename T, typename std::enable_if_t<std::is_integral<T>::value> * = nullptr> __host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start, long end, const ParseOptions &opts) { T &value{static_cast<T *>(gdf_columns)[row]}; // Check for user-specified true/false values first, where the output is // replaced with 1/0 respectively const size_t field_len = end - start + 1; if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len)) { value = 1; } else if (serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) { value = 0; } else { value = convertStrToValue<T>(data, start, end, opts); } } /**---------------------------------------------------------------------------* * @brief Default template operator() dispatch specialization all data types * (including wrapper types) that is not covered by above. *---------------------------------------------------------------------------**/ template <typename T, typename std::enable_if_t<!std::is_integral<T>::value> * = nullptr> __host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start, long end, const ParseOptions &opts) { T &value{static_cast<T *>(gdf_columns)[row]}; value = convertStrToValue<T>(data, start, end, opts); } }; /**---------------------------------------------------------------------------* * @brief CUDA Kernel that modifies the start and stop offsets to exclude * the sections outside of the top level brackets. * * The top level brackets characters are excluded from the resulting range. * Parameter stop has the same semantics as end() in STL containers * (one past the last element) * * @param[in] data Pointer to the device buffer containing the data to process * @param[in,out] start Offset of the first character in the range * @param[in,out] stop Offset of the first character after the range * * @return void *---------------------------------------------------------------------------**/ __device__ void limitRangeToBrackets(const char *data, long &start, long &stop) { while (start < stop && data[start] != '[' && data[start] != '{') { start++; } start++; while (start < stop && data[stop - 1] != ']' && data[stop - 1] != '}') { stop--; } stop--; } /**---------------------------------------------------------------------------* * @brief CUDA kernel that finds the end position of the next field name, * including the colon that separates the name from the field value. * * Returns the position after the colon that preceeds the value token. * * @param[in] data Pointer to the device buffer containing the data to process * @param[in] opts Parsing options (e.g. delimiter and quotation character) * @param[in] start Offset of the first character in the range * @param[in] stop Offset of the first character after the range * * @return long Position of the first character after the field name. *---------------------------------------------------------------------------**/ __device__ long seekFieldNameEnd(const char *data, const ParseOptions opts, long start, long stop) { bool quotation = false; for (auto pos = start; pos < stop; ++pos) { // Ignore escaped quotes if (data[pos] == opts.quotechar && data[pos - 1] != '\\') { quotation = !quotation; } else if (!quotation && data[pos] == ':') { return pos + 1; } } return stop; } /**---------------------------------------------------------------------------* * @brief CUDA kernel that parses and converts plain text data into cuDF column data. * * Data is processed one record at a time * * @param[in] data The entire data to read * @param[in] data_size Size of the data buffer, in bytes * @param[in] rec_starts The start of each data record * @param[in] num_records The number of lines/rows * @param[in] dtypes The data type of each column * @param[in] opts A set of parsing options * @param[out] gdf_columns The output column data * @param[in] num_columns The number of columns * @param[out] valid_fields The bitmaps indicating whether column fields are valid * @param[out] num_valid_fields The numbers of valid fields in columns * * @return void *---------------------------------------------------------------------------**/ __global__ void convertJsonToGdf(const char *data, size_t data_size, const uint64_t *rec_starts, cudf::size_type num_records, const gdf_dtype *dtypes, ParseOptions opts, void *const *gdf_columns, int num_columns, cudf::valid_type *const *valid_fields, cudf::size_type *num_valid_fields) { const long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); if (rec_id >= num_records) return; long start = rec_starts[rec_id]; // has the same semantics as end() in STL containers (one past last element) long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size); limitRangeToBrackets(data, start, stop); const bool is_object = (data[start - 1] == '{'); for (int col = 0; col < num_columns && start < stop; col++) { if (is_object) { start = seekFieldNameEnd(data, opts, start, stop); } // field_end is at the next delimiter/newline const long field_end = seekFieldEnd(data, opts, start, stop); long field_data_last = field_end - 1; // Modify start & end to ignore whitespace and quotechars adjustForWhitespaceAndQuotes(data, &start, &field_data_last, opts.quotechar); // Empty fields are not legal values if (start <= field_data_last && !serializedTrieContains(opts.naValuesTrie, data + start, field_end - start)) { // Type dispatcher does not handle GDF_STRINGS if (dtypes[col] == gdf_dtype::GDF_STRING) { auto str_list = static_cast<string_pair *>(gdf_columns[col]); str_list[rec_id].first = data + start; str_list[rec_id].second = field_data_last - start + 1; } else { cudf::type_dispatcher(dtypes[col], ConvertFunctor{}, data, gdf_columns[col], rec_id, start, field_data_last, opts); } // set the valid bitmap - all bits were set to 0 to start setBitmapBit(valid_fields[col], rec_id); atomicAdd(&num_valid_fields[col], 1); } else if (dtypes[col] == gdf_dtype::GDF_STRING) { auto str_list = static_cast<string_pair *>(gdf_columns[col]); str_list[rec_id].first = nullptr; str_list[rec_id].second = 0; } start = field_end + 1; } } void reader::Impl::convertJsonToColumns(gdf_dtype *const dtypes, void *const *gdf_columns, cudf::valid_type *const *valid_fields, cudf::size_type *num_valid_fields) { int block_size; int min_grid_size; CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, convertJsonToGdf)); const int grid_size = (rec_starts_.size() + block_size - 1) / block_size; hipLaunchKernelGGL(( convertJsonToGdf) , dim3(grid_size), dim3(block_size) , 0, 0, static_cast<char *>(data_.data()), data_.size(), rec_starts_.data().get(), rec_starts_.size(), dtypes, opts_, gdf_columns, columns_.size(), valid_fields, num_valid_fields); CUDA_TRY(hipGetLastError()); } /**---------------------------------------------------------------------------* * @brief CUDA kernel that parses and converts data into cuDF column data. * * Data is processed in one row/record at a time, so the number of total * threads (tid) is equal to the number of rows. * * @param[in] data The entire plain text data to read * @param[in] data_size Size of the data buffer, in bytes * @param[in] opts A set of parsing options * @param[in] num_columns The number of columns of input data * @param[in] rec_starts The start the input data of interest * @param[in] num_records The number of lines/rows of input data * @param[out] column_infos The count for each column data type * * @returns void *---------------------------------------------------------------------------**/ __global__ void detectJsonDataTypes(const char *data, size_t data_size, const ParseOptions opts, int num_columns, const uint64_t *rec_starts, cudf::size_type num_records, ColumnInfo *column_infos) { long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); if (rec_id >= num_records) return; long start = rec_starts[rec_id]; // has the same semantics as end() in STL containers (one past last element) long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size); limitRangeToBrackets(data, start, stop); const bool is_object = (data[start - 1] == '{'); for (int col = 0; col < num_columns; col++) { if (is_object) { start = seekFieldNameEnd(data, opts, start, stop); } const long field_end = seekFieldEnd(data, opts, start, stop); long field_data_last = field_end - 1; adjustForWhitespaceAndQuotes(data, &start, &field_data_last); const int field_len = field_data_last - start + 1; // Checking if the field is empty if (start > field_data_last || serializedTrieContains(opts.naValuesTrie, data + start, field_len)) { atomicAdd(&column_infos[col].null_count, 1); start = field_end + 1; continue; } int digit_count = 0; int decimal_count = 0; int slash_count = 0; int dash_count = 0; int colon_count = 0; int exponent_count = 0; int other_count = 0; const bool maybe_hex = ((field_len > 2 && data[start] == '0' && data[start + 1] == 'x') || (field_len > 3 && data[start] == '-' && data[start + 1] == '0' && data[start + 2] == 'x')); for (long pos = start; pos <= field_data_last; pos++) { if (isDigit(data[pos], maybe_hex)) { digit_count++; continue; } // Looking for unique characters that will help identify column types switch (data[pos]) { case '.': decimal_count++; break; case '-': dash_count++; break; case '/': slash_count++; break; case ':': colon_count++; break; case 'e': case 'E': if (!maybe_hex && pos > start && pos < field_data_last) exponent_count++; break; default: other_count++; break; } } // Integers have to have the length of the string int int_req_number_cnt = field_len; // Off by one if they start with a minus sign if (data[start] == '-' && field_len > 1) { --int_req_number_cnt; } // Off by one if they are a hexadecimal number if (maybe_hex) { --int_req_number_cnt; } if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len) || serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) { atomicAdd(&column_infos[col].bool_count, 1); } else if (digit_count == int_req_number_cnt) { atomicAdd(&column_infos[col].int_count, 1); } else if (isLikeFloat(field_len, digit_count, decimal_count, dash_count, exponent_count)) { atomicAdd(&column_infos[col].float_count, 1); } // A date-time field cannot have more than 3 non-special characters // A number field cannot have more than one decimal point else if (other_count > 3 || decimal_count > 1) { atomicAdd(&column_infos[col].string_count, 1); } else { // A date field can have either one or two '-' or '\'; A legal combination will only have one of them // To simplify the process of auto column detection, we are not covering all the date-time formation permutations if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) || (dash_count == 0 && slash_count > 0 && slash_count <= 2)) { if (colon_count <= 2) { atomicAdd(&column_infos[col].datetime_count, 1); } else { atomicAdd(&column_infos[col].string_count, 1); } } else { // Default field type is string atomicAdd(&column_infos[col].string_count, 1); } } start = field_end + 1; } } void reader::Impl::detectDataTypes(ColumnInfo *column_infos) { int block_size; int min_grid_size; CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detectJsonDataTypes)); // Calculate actual block count to use based on records count const int grid_size = (rec_starts_.size() + block_size - 1) / block_size; hipLaunchKernelGGL(( detectJsonDataTypes) , dim3(grid_size), dim3(block_size) , 0, 0, static_cast<char *>(data_.data()), data_.size(), opts_, column_names_.size(), rec_starts_.data().get(), rec_starts_.size(), column_infos); CUDA_TRY(hipGetLastError()); } void reader::Impl::setDataTypes() { if (!args_.dtype.empty()) { CUDF_EXPECTS(args_.dtype.size() == column_names_.size(), "Need to specify the type of each column.\n"); // Assume that the dtype is in dictionary format only if all elements contain a colon const bool is_dict = std::all_of(args_.dtype.begin(), args_.dtype.end(), [](const std::string &s) { return std::find(s.begin(), s.end(), ':') != s.end(); }); if (is_dict) { std::map<std::string, gdf_dtype> col_type_map; std::map<std::string, gdf_dtype_extra_info> col_type_info_map; for (const auto &ts : args_.dtype) { const size_t colon_idx = ts.find(":"); const std::string col_name(ts.begin(), ts.begin() + colon_idx); const std::string type_str(ts.begin() + colon_idx + 1, ts.end()); std::tie( col_type_map[col_name], col_type_info_map[col_name] ) = convertStringToDtype(type_str); } // Using the map here allows O(n log n) complexity for (size_t col = 0; col < args_.dtype.size(); ++col) { dtypes_.push_back(col_type_map[column_names_[col]]); dtypes_extra_info_.push_back(col_type_info_map[column_names_[col]]); } } else { auto dtype_ = std::back_inserter(dtypes_); auto dtype_info_ = std::back_inserter(dtypes_extra_info_); for (size_t col = 0; col < args_.dtype.size(); ++col) { std::tie(dtype_, dtype_info_) = convertStringToDtype(args_.dtype[col]); } } } else { CUDF_EXPECTS(rec_starts_.size() != 0, "No data available for data type inference.\n"); const auto num_columns = column_names_.size(); dtypes_extra_info_ = std::vector<gdf_dtype_extra_info>(num_columns, gdf_dtype_extra_info{ TIME_UNIT_NONE }); rmm::device_vector<ColumnInfo> d_column_infos(num_columns, ColumnInfo{}); detectDataTypes(d_column_infos.data().get()); thrust::host_vector<ColumnInfo> h_column_infos = d_column_infos; for (const auto &cinfo : h_column_infos) { if (cinfo.null_count == static_cast<int>(rec_starts_.size())) { // Entire column is NULL; allocate the smallest amount of memory dtypes_.push_back(GDF_INT8); } else if (cinfo.string_count > 0) { dtypes_.push_back(GDF_STRING); } else if (cinfo.datetime_count > 0) { dtypes_.push_back(GDF_DATE64); } else if (cinfo.float_count > 0 || (cinfo.int_count > 0 && cinfo.null_count > 0)) { dtypes_.push_back(GDF_FLOAT64); } else if (cinfo.int_count > 0) { dtypes_.push_back(GDF_INT64); } else if (cinfo.bool_count > 0) { dtypes_.push_back(GDF_BOOL8); } else { CUDF_FAIL("Data type detection failed.\n"); } } } } reader::reader(std::string filepath, reader_options const &options) : impl_(std::make_unique<Impl>(nullptr, filepath, options)) { // Delay actual instantiation of data source until read to allow for // partial memory mapping of file using byte ranges } reader::reader(const char *buffer, size_t length, reader_options const &options) : impl_(std::make_unique<Impl>(datasource::create(buffer, length), "", options)) {} reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file, reader_options const &options) : impl_(std::make_unique<Impl>(datasource::create(file), "", options)) {} table reader::read() { return impl_->read(0, 0); } table reader::read_byte_range(size_t offset, size_t size) { return impl_->read(offset, size); } reader::~reader() = default; } // namespace json } // namespace io } // namespace cudf
9fb2410f0cdd7d0835c8102b9ce1580726c38acf.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "json_reader_impl.hpp" #include <algorithm> #include <iostream> #include <map> #include <memory> #include <numeric> #include <tuple> #include <utility> #include <vector> #include <utilities/legacy/cudf_utils.h> #include <cudf/utilities/error.hpp> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <io/comp/io_uncomp.h> #include <io/legacy/cuio_common.hpp> #include <io/utilities/legacy/parsing_utils.cuh> namespace cudf { namespace io { namespace json { using string_pair = std::pair<const char *, size_t>; reader::Impl::Impl(std::unique_ptr<datasource> source, std::string filepath, reader_options const &options) : source_(std::move(source)), filepath_(filepath), args_(options) { CUDF_EXPECTS(args_.lines, "Only JSON Lines format is currently supported.\n"); d_true_trie_ = createSerializedTrie({"true"}); opts_.trueValuesTrie = d_true_trie_.data().get(); d_false_trie_ = createSerializedTrie({"false"}); opts_.falseValuesTrie = d_false_trie_.data().get(); d_na_trie_ = createSerializedTrie({"null"}); opts_.naValuesTrie = d_na_trie_.data().get(); } /**---------------------------------------------------------------------------* * @brief Estimates the maximum expected length or a row, based on the number * of columns * * If the number of columns is not available, it will return a value large * enough for most use cases * * @param[in] num_columns Number of columns in the JSON file (optional) * * @return Estimated maximum size of a row, in bytes *---------------------------------------------------------------------------**/ constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept { constexpr size_t max_row_bytes = 16 * 1024; // 16KB constexpr size_t column_bytes = 64; constexpr size_t base_padding = 1024; // 1KB if (num_columns == 0) { // Use flat size if the number of columns is not known return max_row_bytes; } else { // Expand the size based on the number of columns, if available return base_padding + num_columns * column_bytes; } } table reader::Impl::read(size_t range_offset, size_t range_size) { ingestRawInput(range_offset, range_size); CUDF_EXPECTS(buffer_ != nullptr, "Ingest failed: input data is null.\n"); decompressInput(); CUDF_EXPECTS(uncomp_data_ != nullptr, "Ingest failed: uncompressed input data is null.\n"); CUDF_EXPECTS(uncomp_size_ != 0, "Ingest failed: uncompressed input data has zero size.\n"); setRecordStarts(); CUDF_EXPECTS(!rec_starts_.empty(), "Error enumerating records.\n"); uploadDataToDevice(); CUDF_EXPECTS(data_.size() != 0, "Error uploading input data to the GPU.\n"); setColumnNames(); CUDF_EXPECTS(!column_names_.empty(), "Error determining column names.\n"); setDataTypes(); CUDF_EXPECTS(!dtypes_.empty(), "Error in data type detection.\n"); convertDataToColumns(); CUDF_EXPECTS(!columns_.empty(), "Error converting json input into gdf columns.\n"); // Transfer ownership to raw pointer output std::vector<gdf_column *> out_cols(columns_.size()); for (size_t i = 0; i < columns_.size(); ++i) { out_cols[i] = columns_[i].release(); } return table(out_cols.data(), out_cols.size()); } void reader::Impl::ingestRawInput(size_t range_offset, size_t range_size) { size_t map_range_size = 0; if (range_size != 0) { map_range_size = range_size + calculateMaxRowSize(args_.dtype.size()); } // Support delayed opening of the file if using memory mapping datasource // This allows only mapping of a subset of the file if using byte range if (source_ == nullptr) { assert(!filepath_.empty()); source_ = datasource::create(filepath_, range_offset, map_range_size); } if (!source_->empty()) { auto data_size = (map_range_size != 0) ? map_range_size : source_->size(); buffer_ = source_->get_buffer(range_offset, data_size); } byte_range_offset_ = range_offset; byte_range_size_ = range_size; } void reader::Impl::decompressInput() { const auto compression_type = infer_compression_type( args_.compression, filepath_, {{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}}); if (compression_type == "none") { // Do not use the owner vector here to avoid extra copy uncomp_data_ = reinterpret_cast<const char *>(buffer_->data()); uncomp_size_ = buffer_->size(); } else { CUDF_EXPECTS(getUncompressedHostData( reinterpret_cast<const char *>(buffer_->data()), buffer_->size(), compression_type, uncomp_data_owner_) == GDF_SUCCESS, "Input data decompression failed.\n"); uncomp_data_ = uncomp_data_owner_.data(); uncomp_size_ = uncomp_data_owner_.size(); } } void reader::Impl::setRecordStarts() { std::vector<char> chars_to_count{'\n'}; // Currently, ignoring lineterminations within quotes is handled by recording the records of both, // and then filtering out the records that is a quotechar or a linetermination within a quotechar pair. if (allow_newlines_in_strings_) { chars_to_count.push_back('\"'); } // If not starting at an offset, add an extra row to account for the first row in the file const auto prefilter_count = countAllFromSet(uncomp_data_, uncomp_size_, chars_to_count) + ((byte_range_offset_ == 0) ? 1 : 0); rec_starts_.resize(prefilter_count); auto *find_result_ptr = rec_starts_.data().get(); // Manually adding an extra row to account for the first row in the file if (byte_range_offset_ == 0) { find_result_ptr++; CUDA_TRY(cudaMemsetAsync(rec_starts_.data().get(), 0ull, sizeof(uint64_t))); } std::vector<char> chars_to_find{'\n'}; if (allow_newlines_in_strings_) { chars_to_find.push_back('\"'); } // Passing offset = 1 to return positions AFTER the found character findAllFromSet(uncomp_data_, uncomp_size_, chars_to_find, 1, find_result_ptr); // Previous call stores the record pinput_file.typeositions as encountered by all threads // Sort the record positions as subsequent processing may require filtering // certain rows or other processing on specific records thrust::sort(rmm::exec_policy()->on(0), rec_starts_.begin(), rec_starts_.end()); auto filtered_count = prefilter_count; if (allow_newlines_in_strings_) { thrust::host_vector<uint64_t> h_rec_starts = rec_starts_; bool quotation = false; for (cudf::size_type i = 1; i < prefilter_count; ++i) { if (uncomp_data_[h_rec_starts[i] - 1] == '\"') { quotation = !quotation; h_rec_starts[i] = uncomp_size_; filtered_count--; } else if (quotation) { h_rec_starts[i] = uncomp_size_; filtered_count--; } } rec_starts_ = h_rec_starts; thrust::sort(rmm::exec_policy()->on(0), rec_starts_.begin(), rec_starts_.end()); } // Exclude the ending newline as it does not precede a record start if (uncomp_data_[uncomp_size_ - 1] == '\n') { filtered_count--; } rec_starts_.resize(filtered_count); } void reader::Impl::uploadDataToDevice() { size_t start_offset = 0; size_t end_offset = uncomp_size_; // Trim lines that are outside range if (byte_range_size_ != 0 || byte_range_offset_ != 0) { thrust::host_vector<uint64_t> h_rec_starts = rec_starts_; if (byte_range_size_ != 0) { auto it = h_rec_starts.end() - 1; while (it >= h_rec_starts.begin() && *it > byte_range_size_) { end_offset = *it; --it; } h_rec_starts.erase(it + 1, h_rec_starts.end()); } // Resize to exclude rows outside of the range // Adjust row start positions to account for the data subcopy start_offset = h_rec_starts.front(); rec_starts_.resize(h_rec_starts.size()); thrust::transform(rmm::exec_policy()->on(0), rec_starts_.begin(), rec_starts_.end(), thrust::make_constant_iterator(start_offset), rec_starts_.begin(), thrust::minus<uint64_t>()); } const size_t bytes_to_upload = end_offset - start_offset; CUDF_EXPECTS(bytes_to_upload <= uncomp_size_, "Error finding the record within the specified byte range.\n"); // Upload the raw data that is within the rows of interest data_ = rmm::device_buffer(uncomp_data_ + start_offset, bytes_to_upload); } /**---------------------------------------------------------------------------* * @brief Extract value names from a JSON object * * @param[in] json_obj Host vector containing the JSON object * @param[in] opts Parsing options (e.g. delimiter and quotation character) * * @return std::vector<std::string> names of JSON object values *---------------------------------------------------------------------------**/ std::vector<std::string> getNamesFromJsonObject(const std::vector<char> &json_obj, const ParseOptions &opts) { enum class ParseState { preColName, colName, postColName }; std::vector<std::string> names; bool quotation = false; auto state = ParseState::preColName; int name_start = 0; for (size_t pos = 0; pos < json_obj.size(); ++pos) { if (state == ParseState::preColName) { if (json_obj[pos] == opts.quotechar) { name_start = pos + 1; state = ParseState::colName; continue; } } else if (state == ParseState::colName) { if (json_obj[pos] == opts.quotechar && json_obj[pos - 1] != '\\') { // if found a non-escaped quote character, it's the end of the column name names.emplace_back(&json_obj[name_start], &json_obj[pos]); state = ParseState::postColName; continue; } } else if (state == ParseState::postColName) { // TODO handle complex data types that might include unquoted commas if (!quotation && json_obj[pos] == opts.delimiter) { state = ParseState::preColName; continue; } else if (json_obj[pos] == opts.quotechar) { quotation = !quotation; } } } return names; } void reader::Impl::setColumnNames() { // If file only contains one row, use the file size for the row size uint64_t first_row_len = data_.size() / sizeof(char); if (rec_starts_.size() > 1) { // Set first_row_len to the offset of the second row, if it exists CUDA_TRY(cudaMemcpyAsync(&first_row_len, rec_starts_.data().get() + 1, sizeof(uint64_t), cudaMemcpyDeviceToHost)); } std::vector<char> first_row(first_row_len); CUDA_TRY(cudaMemcpyAsync(first_row.data(), data_.data(), first_row_len * sizeof(char), cudaMemcpyDeviceToHost)); CUDA_TRY(cudaStreamSynchronize(0)); // Determine the row format between: // JSON array - [val1, val2, ...] and // JSON object - {"col1":val1, "col2":val2, ...} // based on the top level opening bracket const auto first_square_bracket = std::find(first_row.begin(), first_row.end(), '['); const auto first_curly_bracket = std::find(first_row.begin(), first_row.end(), '{'); CUDF_EXPECTS(first_curly_bracket != first_row.end() || first_square_bracket != first_row.end(), "Input data is not a valid JSON file."); // If the first opening bracket is '{', assume object format const bool is_object = first_curly_bracket < first_square_bracket; if (is_object) { column_names_ = getNamesFromJsonObject(first_row, opts_); } else { int cols_found = 0; bool quotation = false; for (size_t pos = 0; pos < first_row.size(); ++pos) { // Flip the quotation flag if current character is a quotechar if (first_row[pos] == opts_.quotechar) { quotation = !quotation; } // Check if end of a column/row else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts_.delimiter)) { column_names_.emplace_back(std::to_string(cols_found++)); } } } } void reader::Impl::convertDataToColumns() { const auto num_columns = dtypes_.size(); for (size_t col = 0; col < num_columns; ++col) { columns_.emplace_back(rec_starts_.size(), dtypes_[col], gdf_dtype_extra_info{TIME_UNIT_NONE}, column_names_[col]); columns_.back().allocate(); } thrust::host_vector<gdf_dtype> h_dtypes(num_columns); thrust::host_vector<void *> h_data(num_columns); thrust::host_vector<cudf::valid_type *> h_valid(num_columns); for (size_t i = 0; i < num_columns; ++i) { h_dtypes[i] = columns_[i]->dtype; h_data[i] = columns_[i]->data; h_valid[i] = columns_[i]->valid; } rmm::device_vector<gdf_dtype> d_dtypes = h_dtypes; rmm::device_vector<void *> d_data = h_data; rmm::device_vector<cudf::valid_type *> d_valid = h_valid; rmm::device_vector<cudf::size_type> d_valid_counts(num_columns, 0); convertJsonToColumns(d_dtypes.data().get(), d_data.data().get(), d_valid.data().get(), d_valid_counts.data().get()); CUDA_TRY(cudaDeviceSynchronize()); CUDA_TRY(cudaGetLastError()); thrust::host_vector<cudf::size_type> h_valid_counts = d_valid_counts; for (size_t i = 0; i < num_columns; ++i) { columns_[i]->null_count = columns_[i]->size - h_valid_counts[i]; } // Perform any final column preparation (may reference decoded data) for (auto &column : columns_) { column.finalize(); } } /**---------------------------------------------------------------------------* * @brief Functor for converting plain text data to cuDF data type value. *---------------------------------------------------------------------------**/ struct ConvertFunctor { /**---------------------------------------------------------------------------* * @brief Template specialization for operator() for types whose values can be * convertible to a 0 or 1 to represent false/true. The converting is done by * checking against the default and user-specified true/false values list. * * It is handled here rather than within convertStrToValue() as that function * is used by other types (ex. timestamp) that aren't 'booleable'. *---------------------------------------------------------------------------**/ template <typename T, typename std::enable_if_t<std::is_integral<T>::value> * = nullptr> __host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start, long end, const ParseOptions &opts) { T &value{static_cast<T *>(gdf_columns)[row]}; // Check for user-specified true/false values first, where the output is // replaced with 1/0 respectively const size_t field_len = end - start + 1; if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len)) { value = 1; } else if (serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) { value = 0; } else { value = convertStrToValue<T>(data, start, end, opts); } } /**---------------------------------------------------------------------------* * @brief Default template operator() dispatch specialization all data types * (including wrapper types) that is not covered by above. *---------------------------------------------------------------------------**/ template <typename T, typename std::enable_if_t<!std::is_integral<T>::value> * = nullptr> __host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start, long end, const ParseOptions &opts) { T &value{static_cast<T *>(gdf_columns)[row]}; value = convertStrToValue<T>(data, start, end, opts); } }; /**---------------------------------------------------------------------------* * @brief CUDA Kernel that modifies the start and stop offsets to exclude * the sections outside of the top level brackets. * * The top level brackets characters are excluded from the resulting range. * Parameter stop has the same semantics as end() in STL containers * (one past the last element) * * @param[in] data Pointer to the device buffer containing the data to process * @param[in,out] start Offset of the first character in the range * @param[in,out] stop Offset of the first character after the range * * @return void *---------------------------------------------------------------------------**/ __device__ void limitRangeToBrackets(const char *data, long &start, long &stop) { while (start < stop && data[start] != '[' && data[start] != '{') { start++; } start++; while (start < stop && data[stop - 1] != ']' && data[stop - 1] != '}') { stop--; } stop--; } /**---------------------------------------------------------------------------* * @brief CUDA kernel that finds the end position of the next field name, * including the colon that separates the name from the field value. * * Returns the position after the colon that preceeds the value token. * * @param[in] data Pointer to the device buffer containing the data to process * @param[in] opts Parsing options (e.g. delimiter and quotation character) * @param[in] start Offset of the first character in the range * @param[in] stop Offset of the first character after the range * * @return long Position of the first character after the field name. *---------------------------------------------------------------------------**/ __device__ long seekFieldNameEnd(const char *data, const ParseOptions opts, long start, long stop) { bool quotation = false; for (auto pos = start; pos < stop; ++pos) { // Ignore escaped quotes if (data[pos] == opts.quotechar && data[pos - 1] != '\\') { quotation = !quotation; } else if (!quotation && data[pos] == ':') { return pos + 1; } } return stop; } /**---------------------------------------------------------------------------* * @brief CUDA kernel that parses and converts plain text data into cuDF column data. * * Data is processed one record at a time * * @param[in] data The entire data to read * @param[in] data_size Size of the data buffer, in bytes * @param[in] rec_starts The start of each data record * @param[in] num_records The number of lines/rows * @param[in] dtypes The data type of each column * @param[in] opts A set of parsing options * @param[out] gdf_columns The output column data * @param[in] num_columns The number of columns * @param[out] valid_fields The bitmaps indicating whether column fields are valid * @param[out] num_valid_fields The numbers of valid fields in columns * * @return void *---------------------------------------------------------------------------**/ __global__ void convertJsonToGdf(const char *data, size_t data_size, const uint64_t *rec_starts, cudf::size_type num_records, const gdf_dtype *dtypes, ParseOptions opts, void *const *gdf_columns, int num_columns, cudf::valid_type *const *valid_fields, cudf::size_type *num_valid_fields) { const long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); if (rec_id >= num_records) return; long start = rec_starts[rec_id]; // has the same semantics as end() in STL containers (one past last element) long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size); limitRangeToBrackets(data, start, stop); const bool is_object = (data[start - 1] == '{'); for (int col = 0; col < num_columns && start < stop; col++) { if (is_object) { start = seekFieldNameEnd(data, opts, start, stop); } // field_end is at the next delimiter/newline const long field_end = seekFieldEnd(data, opts, start, stop); long field_data_last = field_end - 1; // Modify start & end to ignore whitespace and quotechars adjustForWhitespaceAndQuotes(data, &start, &field_data_last, opts.quotechar); // Empty fields are not legal values if (start <= field_data_last && !serializedTrieContains(opts.naValuesTrie, data + start, field_end - start)) { // Type dispatcher does not handle GDF_STRINGS if (dtypes[col] == gdf_dtype::GDF_STRING) { auto str_list = static_cast<string_pair *>(gdf_columns[col]); str_list[rec_id].first = data + start; str_list[rec_id].second = field_data_last - start + 1; } else { cudf::type_dispatcher(dtypes[col], ConvertFunctor{}, data, gdf_columns[col], rec_id, start, field_data_last, opts); } // set the valid bitmap - all bits were set to 0 to start setBitmapBit(valid_fields[col], rec_id); atomicAdd(&num_valid_fields[col], 1); } else if (dtypes[col] == gdf_dtype::GDF_STRING) { auto str_list = static_cast<string_pair *>(gdf_columns[col]); str_list[rec_id].first = nullptr; str_list[rec_id].second = 0; } start = field_end + 1; } } void reader::Impl::convertJsonToColumns(gdf_dtype *const dtypes, void *const *gdf_columns, cudf::valid_type *const *valid_fields, cudf::size_type *num_valid_fields) { int block_size; int min_grid_size; CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, convertJsonToGdf)); const int grid_size = (rec_starts_.size() + block_size - 1) / block_size; convertJsonToGdf <<< grid_size, block_size >>> ( static_cast<char *>(data_.data()), data_.size(), rec_starts_.data().get(), rec_starts_.size(), dtypes, opts_, gdf_columns, columns_.size(), valid_fields, num_valid_fields); CUDA_TRY(cudaGetLastError()); } /**---------------------------------------------------------------------------* * @brief CUDA kernel that parses and converts data into cuDF column data. * * Data is processed in one row/record at a time, so the number of total * threads (tid) is equal to the number of rows. * * @param[in] data The entire plain text data to read * @param[in] data_size Size of the data buffer, in bytes * @param[in] opts A set of parsing options * @param[in] num_columns The number of columns of input data * @param[in] rec_starts The start the input data of interest * @param[in] num_records The number of lines/rows of input data * @param[out] column_infos The count for each column data type * * @returns void *---------------------------------------------------------------------------**/ __global__ void detectJsonDataTypes(const char *data, size_t data_size, const ParseOptions opts, int num_columns, const uint64_t *rec_starts, cudf::size_type num_records, ColumnInfo *column_infos) { long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); if (rec_id >= num_records) return; long start = rec_starts[rec_id]; // has the same semantics as end() in STL containers (one past last element) long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size); limitRangeToBrackets(data, start, stop); const bool is_object = (data[start - 1] == '{'); for (int col = 0; col < num_columns; col++) { if (is_object) { start = seekFieldNameEnd(data, opts, start, stop); } const long field_end = seekFieldEnd(data, opts, start, stop); long field_data_last = field_end - 1; adjustForWhitespaceAndQuotes(data, &start, &field_data_last); const int field_len = field_data_last - start + 1; // Checking if the field is empty if (start > field_data_last || serializedTrieContains(opts.naValuesTrie, data + start, field_len)) { atomicAdd(&column_infos[col].null_count, 1); start = field_end + 1; continue; } int digit_count = 0; int decimal_count = 0; int slash_count = 0; int dash_count = 0; int colon_count = 0; int exponent_count = 0; int other_count = 0; const bool maybe_hex = ((field_len > 2 && data[start] == '0' && data[start + 1] == 'x') || (field_len > 3 && data[start] == '-' && data[start + 1] == '0' && data[start + 2] == 'x')); for (long pos = start; pos <= field_data_last; pos++) { if (isDigit(data[pos], maybe_hex)) { digit_count++; continue; } // Looking for unique characters that will help identify column types switch (data[pos]) { case '.': decimal_count++; break; case '-': dash_count++; break; case '/': slash_count++; break; case ':': colon_count++; break; case 'e': case 'E': if (!maybe_hex && pos > start && pos < field_data_last) exponent_count++; break; default: other_count++; break; } } // Integers have to have the length of the string int int_req_number_cnt = field_len; // Off by one if they start with a minus sign if (data[start] == '-' && field_len > 1) { --int_req_number_cnt; } // Off by one if they are a hexadecimal number if (maybe_hex) { --int_req_number_cnt; } if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len) || serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) { atomicAdd(&column_infos[col].bool_count, 1); } else if (digit_count == int_req_number_cnt) { atomicAdd(&column_infos[col].int_count, 1); } else if (isLikeFloat(field_len, digit_count, decimal_count, dash_count, exponent_count)) { atomicAdd(&column_infos[col].float_count, 1); } // A date-time field cannot have more than 3 non-special characters // A number field cannot have more than one decimal point else if (other_count > 3 || decimal_count > 1) { atomicAdd(&column_infos[col].string_count, 1); } else { // A date field can have either one or two '-' or '\'; A legal combination will only have one of them // To simplify the process of auto column detection, we are not covering all the date-time formation permutations if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) || (dash_count == 0 && slash_count > 0 && slash_count <= 2)) { if (colon_count <= 2) { atomicAdd(&column_infos[col].datetime_count, 1); } else { atomicAdd(&column_infos[col].string_count, 1); } } else { // Default field type is string atomicAdd(&column_infos[col].string_count, 1); } } start = field_end + 1; } } void reader::Impl::detectDataTypes(ColumnInfo *column_infos) { int block_size; int min_grid_size; CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detectJsonDataTypes)); // Calculate actual block count to use based on records count const int grid_size = (rec_starts_.size() + block_size - 1) / block_size; detectJsonDataTypes <<< grid_size, block_size >>> ( static_cast<char *>(data_.data()), data_.size(), opts_, column_names_.size(), rec_starts_.data().get(), rec_starts_.size(), column_infos); CUDA_TRY(cudaGetLastError()); } void reader::Impl::setDataTypes() { if (!args_.dtype.empty()) { CUDF_EXPECTS(args_.dtype.size() == column_names_.size(), "Need to specify the type of each column.\n"); // Assume that the dtype is in dictionary format only if all elements contain a colon const bool is_dict = std::all_of(args_.dtype.begin(), args_.dtype.end(), [](const std::string &s) { return std::find(s.begin(), s.end(), ':') != s.end(); }); if (is_dict) { std::map<std::string, gdf_dtype> col_type_map; std::map<std::string, gdf_dtype_extra_info> col_type_info_map; for (const auto &ts : args_.dtype) { const size_t colon_idx = ts.find(":"); const std::string col_name(ts.begin(), ts.begin() + colon_idx); const std::string type_str(ts.begin() + colon_idx + 1, ts.end()); std::tie( col_type_map[col_name], col_type_info_map[col_name] ) = convertStringToDtype(type_str); } // Using the map here allows O(n log n) complexity for (size_t col = 0; col < args_.dtype.size(); ++col) { dtypes_.push_back(col_type_map[column_names_[col]]); dtypes_extra_info_.push_back(col_type_info_map[column_names_[col]]); } } else { auto dtype_ = std::back_inserter(dtypes_); auto dtype_info_ = std::back_inserter(dtypes_extra_info_); for (size_t col = 0; col < args_.dtype.size(); ++col) { std::tie(dtype_, dtype_info_) = convertStringToDtype(args_.dtype[col]); } } } else { CUDF_EXPECTS(rec_starts_.size() != 0, "No data available for data type inference.\n"); const auto num_columns = column_names_.size(); dtypes_extra_info_ = std::vector<gdf_dtype_extra_info>(num_columns, gdf_dtype_extra_info{ TIME_UNIT_NONE }); rmm::device_vector<ColumnInfo> d_column_infos(num_columns, ColumnInfo{}); detectDataTypes(d_column_infos.data().get()); thrust::host_vector<ColumnInfo> h_column_infos = d_column_infos; for (const auto &cinfo : h_column_infos) { if (cinfo.null_count == static_cast<int>(rec_starts_.size())) { // Entire column is NULL; allocate the smallest amount of memory dtypes_.push_back(GDF_INT8); } else if (cinfo.string_count > 0) { dtypes_.push_back(GDF_STRING); } else if (cinfo.datetime_count > 0) { dtypes_.push_back(GDF_DATE64); } else if (cinfo.float_count > 0 || (cinfo.int_count > 0 && cinfo.null_count > 0)) { dtypes_.push_back(GDF_FLOAT64); } else if (cinfo.int_count > 0) { dtypes_.push_back(GDF_INT64); } else if (cinfo.bool_count > 0) { dtypes_.push_back(GDF_BOOL8); } else { CUDF_FAIL("Data type detection failed.\n"); } } } } reader::reader(std::string filepath, reader_options const &options) : impl_(std::make_unique<Impl>(nullptr, filepath, options)) { // Delay actual instantiation of data source until read to allow for // partial memory mapping of file using byte ranges } reader::reader(const char *buffer, size_t length, reader_options const &options) : impl_(std::make_unique<Impl>(datasource::create(buffer, length), "", options)) {} reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file, reader_options const &options) : impl_(std::make_unique<Impl>(datasource::create(file), "", options)) {} table reader::read() { return impl_->read(0, 0); } table reader::read_byte_range(size_t offset, size_t size) { return impl_->read(offset, size); } reader::~reader() = default; } // namespace json } // namespace io } // namespace cudf
cd7b3afb69024a18c2560f5042f63a2b0319d424.hip
// !!! This is a file automatically generated by hipify!!! #include "catch.hpp" #include <ckt/include/utility.hpp> #include <ckt/include/cuda_config.hpp> #include <ckt/include/for_each_shm.hpp> #include <ckt/include/for_each.hpp> using namespace ckt; template <class T> class reduce_run_nv: public nvstd::function<void(T)> { public: __device__ reduce_run_nv() { m_reduce = {}; } __device__ void operator()(int gid, thrust::tuple<const T*, T*> &tuple) { m_reduce += (thrust::get<0>(tuple))[gid]; // atomicAdd(thrust::get<0>(tuple), thrust::get<1>(tuple)); } __device__ void post_proc(int gid, thrust::tuple<const T*, T*> &tuple) { // printf("here in post proc\n"); // if (blockIdx.x == 0 & threadIdx.x < 2) // printf("my reduce %d tid %d.\n", m_reduce, threadIdx.x); m_reduce = ckt::cuda::blockReduceSum(m_reduce); if (threadIdx.x == 0) (thrust::get<1>(tuple))[blockIdx.x] = m_reduce; // if (blockIdx.x == 0 & threadIdx.x < 2) // printf("after my reduce %d tid %d.\n", m_reduce, threadIdx.x); } private: T m_reduce; }; TEST_CASE( "ForEachShmReduce", "[sum]" ) { int n = 2000; JVector<int> sum(n); thrust::fill(sum.gbegin(), sum.gend(), 1); // ForEachKernel<StridePolicy, 256, false> fe(300); // AtomicAdd kernel(300); // AtomicAdd/*<decltype(atomic_run)>*/ kernel(/*atomic_run,*/ n1); ForEachShmKernel<BlockPolicy, konst::default_cuda_blocksize, false> kernel(n, "Reduction"); kernel.set_block_size(1024); kernel.set_max_block(1024); JVector<int> inter_sum(1024); constexpr int shared_bsize = sizeof(int)*1024/32; kernel.run<reduce_run_nv<int>, int, shared_bsize, const int *, int *>(sum.getROGpuPtr(), inter_sum.getGpuPtr()); // inter_sum.print("intersum"); kernel.set_N(kernel.get_num_blocks()); kernel.run<reduce_run_nv<int>, int, shared_bsize, const int *, int *>(inter_sum.getROGpuPtr(), inter_sum.getGpuPtr()); hipDeviceSynchronize(); // inter_sum.print("intersum after"); check_cuda_error("inter_sum", __FILE__, __LINE__); REQUIRE(inter_sum.getElementAt(0) == n); // int sum_now = sum[0]; // REQUIRE(sum_now == n1*add_per_thread); // kernel.set_N(257); // kernel.run<atomic_run_nv<int>, int *, int >(sum.getGpuPtr(), 12); // REQUIRE(sum[0] == (sum_now + 257*12)); check_cuda_error("inter_sum", __FILE__, __LINE__); } TEST_CASE( "ForEachShmReduceDouble", "[sum]" ) { int n = 2000; JVector<double> sum(n); thrust::fill(sum.gbegin(), sum.gend(), 1); // ForEachKernel<StridePolicy, 256, false> fe(300); // AtomicAdd kernel(300); // AtomicAdd/*<decltype(atomic_run)>*/ kernel(/*atomic_run,*/ n1); ForEachShmKernel<BlockPolicy, CKT_cuda_warpsize, false> kernel(n, "Reduction"); kernel.set_block_size(1024); kernel.set_max_block(1024); JVector<double> inter_sum(1024); constexpr int shared_bsize = sizeof(double)*1024/32; kernel.run<reduce_run_nv<double>, double, shared_bsize, const double *, double *>(sum.getROGpuPtr(), inter_sum.getGpuPtr()); // inter_sum.print("intersum"); kernel.set_N(kernel.get_num_blocks()); kernel.run<reduce_run_nv<double>, double, shared_bsize, const double *, double *>(inter_sum.getROGpuPtr(), inter_sum.getGpuPtr()); hipDeviceSynchronize(); // inter_sum.print("intersum after"); check_cuda_error("inter_sum", __FILE__, __LINE__); REQUIRE(inter_sum.getElementAt(0) == n); // int sum_now = sum[0]; // REQUIRE(sum_now == n1*add_per_thread); // kernel.set_N(257); // kernel.run<atomic_run_nv<int>, int *, int >(sum.getGpuPtr(), 12); // REQUIRE(sum[0] == (sum_now + 257*12)); check_cuda_error("inter_sum", __FILE__, __LINE__); }
cd7b3afb69024a18c2560f5042f63a2b0319d424.cu
#include "catch.hpp" #include <ckt/include/utility.hpp> #include <ckt/include/cuda_config.hpp> #include <ckt/include/for_each_shm.hpp> #include <ckt/include/for_each.hpp> using namespace ckt; template <class T> class reduce_run_nv: public nvstd::function<void(T)> { public: __device__ reduce_run_nv() { m_reduce = {}; } __device__ void operator()(int gid, thrust::tuple<const T*, T*> &tuple) { m_reduce += (thrust::get<0>(tuple))[gid]; // atomicAdd(thrust::get<0>(tuple), thrust::get<1>(tuple)); } __device__ void post_proc(int gid, thrust::tuple<const T*, T*> &tuple) { // printf("here in post proc\n"); // if (blockIdx.x == 0 & threadIdx.x < 2) // printf("my reduce %d tid %d.\n", m_reduce, threadIdx.x); m_reduce = ckt::cuda::blockReduceSum(m_reduce); if (threadIdx.x == 0) (thrust::get<1>(tuple))[blockIdx.x] = m_reduce; // if (blockIdx.x == 0 & threadIdx.x < 2) // printf("after my reduce %d tid %d.\n", m_reduce, threadIdx.x); } private: T m_reduce; }; TEST_CASE( "ForEachShmReduce", "[sum]" ) { int n = 2000; JVector<int> sum(n); thrust::fill(sum.gbegin(), sum.gend(), 1); // ForEachKernel<StridePolicy, 256, false> fe(300); // AtomicAdd kernel(300); // AtomicAdd/*<decltype(atomic_run)>*/ kernel(/*atomic_run,*/ n1); ForEachShmKernel<BlockPolicy, konst::default_cuda_blocksize, false> kernel(n, "Reduction"); kernel.set_block_size(1024); kernel.set_max_block(1024); JVector<int> inter_sum(1024); constexpr int shared_bsize = sizeof(int)*1024/32; kernel.run<reduce_run_nv<int>, int, shared_bsize, const int *, int *>(sum.getROGpuPtr(), inter_sum.getGpuPtr()); // inter_sum.print("intersum"); kernel.set_N(kernel.get_num_blocks()); kernel.run<reduce_run_nv<int>, int, shared_bsize, const int *, int *>(inter_sum.getROGpuPtr(), inter_sum.getGpuPtr()); cudaDeviceSynchronize(); // inter_sum.print("intersum after"); check_cuda_error("inter_sum", __FILE__, __LINE__); REQUIRE(inter_sum.getElementAt(0) == n); // int sum_now = sum[0]; // REQUIRE(sum_now == n1*add_per_thread); // kernel.set_N(257); // kernel.run<atomic_run_nv<int>, int *, int >(sum.getGpuPtr(), 12); // REQUIRE(sum[0] == (sum_now + 257*12)); check_cuda_error("inter_sum", __FILE__, __LINE__); } TEST_CASE( "ForEachShmReduceDouble", "[sum]" ) { int n = 2000; JVector<double> sum(n); thrust::fill(sum.gbegin(), sum.gend(), 1); // ForEachKernel<StridePolicy, 256, false> fe(300); // AtomicAdd kernel(300); // AtomicAdd/*<decltype(atomic_run)>*/ kernel(/*atomic_run,*/ n1); ForEachShmKernel<BlockPolicy, CKT_cuda_warpsize, false> kernel(n, "Reduction"); kernel.set_block_size(1024); kernel.set_max_block(1024); JVector<double> inter_sum(1024); constexpr int shared_bsize = sizeof(double)*1024/32; kernel.run<reduce_run_nv<double>, double, shared_bsize, const double *, double *>(sum.getROGpuPtr(), inter_sum.getGpuPtr()); // inter_sum.print("intersum"); kernel.set_N(kernel.get_num_blocks()); kernel.run<reduce_run_nv<double>, double, shared_bsize, const double *, double *>(inter_sum.getROGpuPtr(), inter_sum.getGpuPtr()); cudaDeviceSynchronize(); // inter_sum.print("intersum after"); check_cuda_error("inter_sum", __FILE__, __LINE__); REQUIRE(inter_sum.getElementAt(0) == n); // int sum_now = sum[0]; // REQUIRE(sum_now == n1*add_per_thread); // kernel.set_N(257); // kernel.run<atomic_run_nv<int>, int *, int >(sum.getGpuPtr(), 12); // REQUIRE(sum[0] == (sum_now + 257*12)); check_cuda_error("inter_sum", __FILE__, __LINE__); }
6cdf2d97256095c5bcc0605c19024e202d658bea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** Copyright 2016 Joachim Wolff Master Thesis Tutor: Fabrizio Costa, Milad Miladi Winter semester 2015/2016 Chair of Bioinformatics Department of Computer Science Faculty of Engineering Albert-Ludwigs-University Freiburg im Breisgau **/ #include "nearestNeighborsCuda.h" #include "kernel.h" #include "typeDefinitionsBasic.h" NearestNeighborsCuda::NearestNeighborsCuda() { } NearestNeighborsCuda::~NearestNeighborsCuda() { } neighborhood* NearestNeighborsCuda::computeNearestNeighbors(neighborhood* neighbors, size_t pSimilarity, SparseMatrixFloat* pRawData, SparseMatrixFloat* pOriginalRawData, size_t pMaxNeighbors) { float* precomputedDotProductNeighbor; int* featureIdsNeighbor; float* valuesNeighbor; size_t maxNnzNeighbor; size_t* sizeNeighbor; float* precomputedDotProductInstance; int* featureIdsInstance; float* valuesInstance; size_t maxNnzInstance; size_t* sizeInstance; // transfer data to gpu and precompute the dot product maxNnzNeighbor = pOriginalRawData->getMaxNnz(); hipMalloc((void **) &precomputedDotProductNeighbor, sizeof(float) * pOriginalRawData->size()); hipMalloc((void **) &featureIdsNeighbor, sizeof(int) * pOriginalRawData->size() * pOriginalRawData->getMaxNnz()); hipMalloc((void **) &valuesNeighbor, sizeof(float) * pOriginalRawData->size() * pOriginalRawData->getMaxNnz()); hipMalloc((void **) &sizeNeighbor, sizeof(size_t) * pOriginalRawData->size()); hipMemcpy(featureIdsNeighbor, pOriginalRawData->getSparseMatrixIndex(), pOriginalRawData->size() * pOriginalRawData->getMaxNnz() * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(valuesNeighbor, pOriginalRawData->getSparseMatrixValues(), pOriginalRawData->size() * pOriginalRawData->getMaxNnz() * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(sizeNeighbor, pOriginalRawData->getSparseMatrixSizeOfInstances(), sizeof(size_t) * pOriginalRawData->size(), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( dotProductSingle), dim3(1024), dim3(32), 0, 0, featureIdsNeighbor, valuesNeighbor, sizeNeighbor, pOriginalRawData->size(), pOriginalRawData->getMaxNnz(), precomputedDotProductNeighbor); hipDeviceSynchronize(); if (pRawData == NULL) { precomputedDotProductInstance = precomputedDotProductNeighbor; featureIdsInstance = featureIdsNeighbor; valuesInstance = valuesNeighbor; maxNnzInstance = maxNnzNeighbor; sizeInstance = sizeNeighbor; } else { // the query dataset is different from the fitted one // transfer data to gpu and precompute the dot product maxNnzInstance = pRawData->getMaxNnz(); hipMalloc((void **) &precomputedDotProductInstance, sizeof(float) * pRawData->size()); hipMalloc((void **) &featureIdsInstance, sizeof(int) * pRawData->size() * pRawData->getMaxNnz()); hipMalloc((void **) &valuesInstance, sizeof(float) * pRawData->size() * pRawData->getMaxNnz()); hipMalloc((void **) &sizeInstance, sizeof(size_t) * pRawData->size()); hipMemcpy(featureIdsInstance, pRawData->getSparseMatrixIndex(), pRawData->size() * pRawData->getMaxNnz() * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(valuesInstance, pRawData->getSparseMatrixValues(), pRawData->size() * pRawData->getMaxNnz() * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(sizeInstance, pRawData->getSparseMatrixSizeOfInstances(), sizeof(size_t) * pRawData->size(), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( dotProductSingle), dim3(1024), dim3(32), 0, 0, featureIdsInstance, valuesInstance, sizeInstance, pRawData->size(), pRawData->getMaxNnz(), precomputedDotProductInstance); hipDeviceSynchronize(); } // compute jump lenghts for list of neighbor candidates. // transfer data to gpu and create space for the euclidean distance/cosine similarity computation // --> float3* dotProducts size_t* jumpLengthList = (size_t*) malloc(neighbors->neighbors->size() * sizeof(size_t)); size_t count = 0; size_t* candidatesSize = (size_t*) malloc(neighbors->neighbors->size() * sizeof(size_t)); for (size_t i = 0; i < neighbors->neighbors->size(); ++i) { jumpLengthList[i] = count; count += neighbors->neighbors->operator[](i).size(); candidatesSize[i] = neighbors->neighbors->operator[](i).size(); } float3* dotProducts; hipMalloc((void **) &dotProducts, sizeof(float3) * count); int* candidates = (int*) malloc(count * sizeof(int)); for (size_t i = 0; i < neighbors->neighbors->size(); ++i) { for (size_t j = 0; j < neighbors->neighbors->operator[](i).size(); ++j) { candidates[jumpLengthList[i]+j] = neighbors->neighbors->operator[](i)[j]; } } int* candidatesCuda; hipMalloc((void **) &candidatesCuda, count * sizeof(int)); hipMemcpy(candidatesCuda, candidates, count * sizeof(int), hipMemcpyHostToDevice); size_t* jumpLengthListCuda; hipMalloc((void **) &jumpLengthListCuda, neighbors->neighbors->size() * sizeof(size_t)); hipMemcpy(jumpLengthListCuda, jumpLengthList, neighbors->neighbors->size() * sizeof(size_t), hipMemcpyHostToDevice); size_t* candidatesSizeCuda; hipMalloc((void **) &candidatesSizeCuda, neighbors->neighbors->size() * sizeof(size_t)); hipMemcpy(candidatesSizeCuda, candidatesSize, neighbors->neighbors->size() * sizeof(size_t), hipMemcpyHostToDevice); // compute all dot products for all candidates with their specific query instance. // The base dataset is called 'neighbors' the query instances are 'instance' hipLaunchKernelGGL(( computeDotProducts), dim3(1024), dim3(32), 0, 0, dotProducts, count, candidatesCuda, jumpLengthListCuda, candidatesSizeCuda, neighbors->neighbors->size(), featureIdsNeighbor, valuesNeighbor, maxNnzNeighbor, sizeNeighbor, featureIdsInstance, valuesInstance, maxNnzInstance, sizeInstance, precomputedDotProductNeighbor, precomputedDotProductInstance); hipDeviceSynchronize(); float* resultsCuda; hipMalloc((void **) &resultsCuda, sizeof(float) * count); // compute euclidean distance or cosine similarity if (pSimilarity) { hipLaunchKernelGGL(( cosineSimilarityCuda), dim3(1024), dim3(32), 0, 0, dotProducts, count, resultsCuda); } else { hipLaunchKernelGGL(( euclideanDistanceCuda), dim3(1024), dim3(32), 0, 0, dotProducts, count, resultsCuda); } // copy data back and sort float* results = (float*) malloc( sizeof(float) * count); hipMemcpy(results, resultsCuda, sizeof(float) * count, hipMemcpyDeviceToHost); // return results neighborhood* neighbors_ = new neighborhood();; neighbors_->neighbors = new vvsize_t(neighbors->neighbors->size()); neighbors_->distances = new vvfloat(neighbors->neighbors->size()); for (size_t i = 0; i < neighbors->neighbors->size(); ++i) { std::vector<sortMapFloat> returnValue(neighbors->neighbors->operator[](i).size()); for (size_t j = 0; j < neighbors->neighbors->operator[](i).size(); ++j) { sortMapFloat element; element.key = neighbors->neighbors->operator[](i)[j]; element.val = results[jumpLengthList[i]+j]; returnValue[j] = element; } size_t vectorSize = ::min(returnValue.size(), pMaxNeighbors); if (pSimilarity) { std::partial_sort(returnValue.begin(), returnValue.begin()+vectorSize, returnValue.end(), mapSortDescByValueFloat); } else { std::partial_sort(returnValue.begin(), returnValue.begin()+vectorSize, returnValue.end(), mapSortAscByValueFloat); } std::vector<size_t> neighborsVector(vectorSize); std::vector<float> distancesVector(vectorSize); if (vectorSize == 0) { neighborsVector.push_back(i); distancesVector.push_back(0.0); } for (size_t j = 0; j < vectorSize; ++j) { neighborsVector[j] = returnValue[j].key; distancesVector[j] = returnValue[j].val; } neighbors_->neighbors->operator[](i) = neighborsVector; neighbors_->distances->operator[](i) = distancesVector; } hipFree(dotProducts); hipFree(candidatesCuda); hipFree(jumpLengthListCuda); hipFree(candidatesSizeCuda); hipFree(featureIdsNeighbor); hipFree(valuesNeighbor); hipFree(sizeNeighbor); hipFree(precomputedDotProductNeighbor); if (pRawData != NULL) { hipFree(featureIdsInstance); hipFree(valuesInstance); hipFree(sizeInstance); hipFree(precomputedDotProductInstance); } free(jumpLengthList); free(candidates); free(candidatesSize); free(results); hipDeviceSynchronize(); return neighbors_; }
6cdf2d97256095c5bcc0605c19024e202d658bea.cu
/** Copyright 2016 Joachim Wolff Master Thesis Tutor: Fabrizio Costa, Milad Miladi Winter semester 2015/2016 Chair of Bioinformatics Department of Computer Science Faculty of Engineering Albert-Ludwigs-University Freiburg im Breisgau **/ #include "nearestNeighborsCuda.h" #include "kernel.h" #include "typeDefinitionsBasic.h" NearestNeighborsCuda::NearestNeighborsCuda() { } NearestNeighborsCuda::~NearestNeighborsCuda() { } neighborhood* NearestNeighborsCuda::computeNearestNeighbors(neighborhood* neighbors, size_t pSimilarity, SparseMatrixFloat* pRawData, SparseMatrixFloat* pOriginalRawData, size_t pMaxNeighbors) { float* precomputedDotProductNeighbor; int* featureIdsNeighbor; float* valuesNeighbor; size_t maxNnzNeighbor; size_t* sizeNeighbor; float* precomputedDotProductInstance; int* featureIdsInstance; float* valuesInstance; size_t maxNnzInstance; size_t* sizeInstance; // transfer data to gpu and precompute the dot product maxNnzNeighbor = pOriginalRawData->getMaxNnz(); cudaMalloc((void **) &precomputedDotProductNeighbor, sizeof(float) * pOriginalRawData->size()); cudaMalloc((void **) &featureIdsNeighbor, sizeof(int) * pOriginalRawData->size() * pOriginalRawData->getMaxNnz()); cudaMalloc((void **) &valuesNeighbor, sizeof(float) * pOriginalRawData->size() * pOriginalRawData->getMaxNnz()); cudaMalloc((void **) &sizeNeighbor, sizeof(size_t) * pOriginalRawData->size()); cudaMemcpy(featureIdsNeighbor, pOriginalRawData->getSparseMatrixIndex(), pOriginalRawData->size() * pOriginalRawData->getMaxNnz() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(valuesNeighbor, pOriginalRawData->getSparseMatrixValues(), pOriginalRawData->size() * pOriginalRawData->getMaxNnz() * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(sizeNeighbor, pOriginalRawData->getSparseMatrixSizeOfInstances(), sizeof(size_t) * pOriginalRawData->size(), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); dotProductSingle<<<1024, 32>>>(featureIdsNeighbor, valuesNeighbor, sizeNeighbor, pOriginalRawData->size(), pOriginalRawData->getMaxNnz(), precomputedDotProductNeighbor); cudaDeviceSynchronize(); if (pRawData == NULL) { precomputedDotProductInstance = precomputedDotProductNeighbor; featureIdsInstance = featureIdsNeighbor; valuesInstance = valuesNeighbor; maxNnzInstance = maxNnzNeighbor; sizeInstance = sizeNeighbor; } else { // the query dataset is different from the fitted one // transfer data to gpu and precompute the dot product maxNnzInstance = pRawData->getMaxNnz(); cudaMalloc((void **) &precomputedDotProductInstance, sizeof(float) * pRawData->size()); cudaMalloc((void **) &featureIdsInstance, sizeof(int) * pRawData->size() * pRawData->getMaxNnz()); cudaMalloc((void **) &valuesInstance, sizeof(float) * pRawData->size() * pRawData->getMaxNnz()); cudaMalloc((void **) &sizeInstance, sizeof(size_t) * pRawData->size()); cudaMemcpy(featureIdsInstance, pRawData->getSparseMatrixIndex(), pRawData->size() * pRawData->getMaxNnz() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(valuesInstance, pRawData->getSparseMatrixValues(), pRawData->size() * pRawData->getMaxNnz() * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(sizeInstance, pRawData->getSparseMatrixSizeOfInstances(), sizeof(size_t) * pRawData->size(), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); dotProductSingle<<<1024, 32>>>(featureIdsInstance, valuesInstance, sizeInstance, pRawData->size(), pRawData->getMaxNnz(), precomputedDotProductInstance); cudaDeviceSynchronize(); } // compute jump lenghts for list of neighbor candidates. // transfer data to gpu and create space for the euclidean distance/cosine similarity computation // --> float3* dotProducts size_t* jumpLengthList = (size_t*) malloc(neighbors->neighbors->size() * sizeof(size_t)); size_t count = 0; size_t* candidatesSize = (size_t*) malloc(neighbors->neighbors->size() * sizeof(size_t)); for (size_t i = 0; i < neighbors->neighbors->size(); ++i) { jumpLengthList[i] = count; count += neighbors->neighbors->operator[](i).size(); candidatesSize[i] = neighbors->neighbors->operator[](i).size(); } float3* dotProducts; cudaMalloc((void **) &dotProducts, sizeof(float3) * count); int* candidates = (int*) malloc(count * sizeof(int)); for (size_t i = 0; i < neighbors->neighbors->size(); ++i) { for (size_t j = 0; j < neighbors->neighbors->operator[](i).size(); ++j) { candidates[jumpLengthList[i]+j] = neighbors->neighbors->operator[](i)[j]; } } int* candidatesCuda; cudaMalloc((void **) &candidatesCuda, count * sizeof(int)); cudaMemcpy(candidatesCuda, candidates, count * sizeof(int), cudaMemcpyHostToDevice); size_t* jumpLengthListCuda; cudaMalloc((void **) &jumpLengthListCuda, neighbors->neighbors->size() * sizeof(size_t)); cudaMemcpy(jumpLengthListCuda, jumpLengthList, neighbors->neighbors->size() * sizeof(size_t), cudaMemcpyHostToDevice); size_t* candidatesSizeCuda; cudaMalloc((void **) &candidatesSizeCuda, neighbors->neighbors->size() * sizeof(size_t)); cudaMemcpy(candidatesSizeCuda, candidatesSize, neighbors->neighbors->size() * sizeof(size_t), cudaMemcpyHostToDevice); // compute all dot products for all candidates with their specific query instance. // The base dataset is called 'neighbors' the query instances are 'instance' computeDotProducts<<<1024, 32>>>(dotProducts, count, candidatesCuda, jumpLengthListCuda, candidatesSizeCuda, neighbors->neighbors->size(), featureIdsNeighbor, valuesNeighbor, maxNnzNeighbor, sizeNeighbor, featureIdsInstance, valuesInstance, maxNnzInstance, sizeInstance, precomputedDotProductNeighbor, precomputedDotProductInstance); cudaDeviceSynchronize(); float* resultsCuda; cudaMalloc((void **) &resultsCuda, sizeof(float) * count); // compute euclidean distance or cosine similarity if (pSimilarity) { cosineSimilarityCuda<<<1024, 32>>>(dotProducts, count, resultsCuda); } else { euclideanDistanceCuda<<<1024, 32>>>(dotProducts, count, resultsCuda); } // copy data back and sort float* results = (float*) malloc( sizeof(float) * count); cudaMemcpy(results, resultsCuda, sizeof(float) * count, cudaMemcpyDeviceToHost); // return results neighborhood* neighbors_ = new neighborhood();; neighbors_->neighbors = new vvsize_t(neighbors->neighbors->size()); neighbors_->distances = new vvfloat(neighbors->neighbors->size()); for (size_t i = 0; i < neighbors->neighbors->size(); ++i) { std::vector<sortMapFloat> returnValue(neighbors->neighbors->operator[](i).size()); for (size_t j = 0; j < neighbors->neighbors->operator[](i).size(); ++j) { sortMapFloat element; element.key = neighbors->neighbors->operator[](i)[j]; element.val = results[jumpLengthList[i]+j]; returnValue[j] = element; } size_t vectorSize = std::min(returnValue.size(), pMaxNeighbors); if (pSimilarity) { std::partial_sort(returnValue.begin(), returnValue.begin()+vectorSize, returnValue.end(), mapSortDescByValueFloat); } else { std::partial_sort(returnValue.begin(), returnValue.begin()+vectorSize, returnValue.end(), mapSortAscByValueFloat); } std::vector<size_t> neighborsVector(vectorSize); std::vector<float> distancesVector(vectorSize); if (vectorSize == 0) { neighborsVector.push_back(i); distancesVector.push_back(0.0); } for (size_t j = 0; j < vectorSize; ++j) { neighborsVector[j] = returnValue[j].key; distancesVector[j] = returnValue[j].val; } neighbors_->neighbors->operator[](i) = neighborsVector; neighbors_->distances->operator[](i) = distancesVector; } cudaFree(dotProducts); cudaFree(candidatesCuda); cudaFree(jumpLengthListCuda); cudaFree(candidatesSizeCuda); cudaFree(featureIdsNeighbor); cudaFree(valuesNeighbor); cudaFree(sizeNeighbor); cudaFree(precomputedDotProductNeighbor); if (pRawData != NULL) { cudaFree(featureIdsInstance); cudaFree(valuesInstance); cudaFree(sizeInstance); cudaFree(precomputedDotProductInstance); } free(jumpLengthList); free(candidates); free(candidatesSize); free(results); cudaDeviceSynchronize(); return neighbors_; }