hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
ab9eb267955036c34ae017058016b870bd584fbe.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "relu_h.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *X = NULL; hipMalloc(&X, XSIZE*YSIZE); float *Y = NULL; hipMalloc(&Y, XSIZE*YSIZE); int size_in = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( relu_h), dim3(gridBlock),dim3(threadBlock), 0, 0, X,Y,size_in); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( relu_h), dim3(gridBlock),dim3(threadBlock), 0, 0, X,Y,size_in); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( relu_h), dim3(gridBlock),dim3(threadBlock), 0, 0, X,Y,size_in); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ab9eb267955036c34ae017058016b870bd584fbe.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "relu_h.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *X = NULL; cudaMalloc(&X, XSIZE*YSIZE); float *Y = NULL; cudaMalloc(&Y, XSIZE*YSIZE); int size_in = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); relu_h<<<gridBlock,threadBlock>>>(X,Y,size_in); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { relu_h<<<gridBlock,threadBlock>>>(X,Y,size_in); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { relu_h<<<gridBlock,threadBlock>>>(X,Y,size_in); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f4e8df0dad609add681f8dcb44f2512c04a1d528.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* % Function: receiver % Inputs: modulated_subframe - modulated subframe (modulated symbols) % M_pusch_rb - number of resource blocks assigned to the UE % Outputs: received_bits - received bits By: Mohammed Mostafa */ #include "sc_fdma_demodulator.cuh" #include "generate_dmrs_pusch_hip.cuh" #include "generate_ul_rs.cuh" #include "generate_psuedo_random_seq.cuh" #include "transform_predecoder.cuh" #include "decompose_subframe.cuh" #include "demapper.cuh" #include "descrambler.cuh" #include "deinterleaver_hip.cuh" #include "channel_estimation.cuh" #include "channel_equalization_zf.cuh" #include <chrono> #define timerInit(); std::chrono::steady_clock::time_point start; std::chrono::steady_clock::time_point end; #define startTimer(); start = std::chrono::steady_clock::now(); #define stopTimer(msg); end = std::chrono::steady_clock::now(); printf(msg, std::chrono::duration_cast<std::chrono::nanoseconds> (end - start).count()/1000000.0) ; //__global__ void test(hipfftComplex* equalized_subframe_d) //{ // int x_idx = 0; // // // //} int main(int argc, char **argv) { //input hipfftComplex* subframe_h = (hipfftComplex *)malloc(sizeof(hipfftComplex)*modulated_subframe_length); for (int i = 0; i < modulated_subframe_length; i++) { subframe_h[i].x = rand() / (float)RAND_MAX * 0.1; subframe_h[i].y = rand() / (float)RAND_MAX * 0.1; } //For timing purpose timerInit(); startTimer(); const int Qm = 6; // Modulation Order(2 = QPSK, 4 = 16QAM, 6 = 64QAM) const int M_pusch_rb = 100; //number of resource blocks assigned to the UE const int N_l = 1; // Number of Layers const int N_ri = 0; //length of ri symbols const int n_s = 0; //assume UE send on subframe 0 const int N_id_cell = 2; //assume enodeB scheduled cell 2 for the UE const int M_pusch_sc = N_sc_rb * M_pusch_rb; //total number of subcarriers const int n_RNTI = 10; //radio network temporary identifier given to the UE by enodeB (assume 10) const int N_bits = Qm * 12 * M_pusch_sc; //Qm * 12 * M_pusch_sc = 2*12*1200 //Copy (c) to Device //Generate Pseudo Random Seq. Byte *c_h = 0; Byte* c_d = 0; hipMalloc((void **)&c_d, sizeof(Byte)*Qm * 12 * M_pusch_sc); hipfftComplex* subframe_d; hipMalloc((void **)&subframe_d, sizeof(hipfftComplex)*modulated_subframe_length); double sum = 0, sum2 = 0; for (int i = 0; i < 1000; i++) { startTimer(); hipMemcpy(subframe_d, subframe_h, sizeof(hipfftComplex)*modulated_subframe_length, hipMemcpyHostToDevice); end = std::chrono::steady_clock::now(); sum2 += (double)std::chrono::duration_cast<std::chrono::nanoseconds> (end - start).count() / 1000000.0; //roctxRangePop(); } printf("avg Time including prs = %lf\n", sum2 / 1000.0); startTimer(); //Device data allocation hipfftComplex* fft_vec_d; hipfftComplex* demod_subframe_d; hipMalloc((void **)&fft_vec_d, sizeof(hipfftComplex)*N_symbs_per_subframe*FFT_size); hipMalloc((void **)&demod_subframe_d, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc); hipfftComplex* x_q_d; hipfftComplex* dmrs1_generated_d = 0, *dmrs2_generated_d = 0; hipMalloc((void **)&dmrs1_generated_d, sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb); hipMalloc((void **)&dmrs2_generated_d, sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb); hipMalloc((void **)&x_q_d, sizeof(hipfftComplex)*prime_nums[M_pusch_rb - 1]); hipfftComplex* dmrs1_decomposed_d; hipfftComplex* dmrs2_decomposed_d; hipfftComplex* complex_data_d; hipMalloc((void **)&complex_data_d, sizeof(hipfftComplex) * 12 * M_pusch_sc); hipMalloc((void **)&dmrs1_decomposed_d, sizeof(hipfftComplex)*M_pusch_sc); hipMalloc((void **)&dmrs2_decomposed_d, sizeof(hipfftComplex)*M_pusch_sc); // Channel estimation and equaliuzation allocation hipfftComplex* channel, *equalized_subframe_d; hipMalloc((void **)&channel, sizeof(hipfftComplex)*M_pusch_sc); hipMalloc((void **)&equalized_subframe_d, sizeof(hipfftComplex)* 12 * M_pusch_sc); //hipfftComplex* channel_h = (hipfftComplex*)malloc(sizeof(hipfftComplex)*M_pusch_sc); //hipfftComplex* dmrs1_generated_h = (hipfftComplex*)malloc(sizeof(hipfftComplex)*M_pusch_sc); //hipfftComplex* dmrs2_generated_h = (hipfftComplex*)malloc(sizeof(hipfftComplex)*M_pusch_sc); //hipfftComplex* equalizer_input_h = (hipfftComplex*)malloc(sizeof(hipfftComplex) * 12 *M_pusch_sc); //hipfftComplex* equalized_subframe_h = (hipfftComplex*)malloc(sizeof(hipfftComplex) * 12 * M_pusch_sc); hipfftComplex* predecoded_data_d; hipMalloc((void **)&predecoded_data_d, sizeof(hipfftComplex)* 12 * M_pusch_sc); Byte *bits_d; hipMalloc((void **)&bits_d, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits Byte *descrambled_bits_d; hipMalloc((void **)&descrambled_bits_d, sizeof(Byte)* Qm * 12 * M_pusch_sc); // Step 1: Define C_mux int C_mux = N_pusch_symbs; // Step 2: Define R_mux and R_prime_mux int H_prime_total = N_bits / (Qm*N_l); int H_prime = H_prime_total - N_ri; int R_mux = (H_prime_total*Qm*N_l) / C_mux; int R_prime_mux = R_mux / (Qm*N_l); Byte *ri_d, *y_idx_d, *y_mat_d; Byte *received_bits_d; hipMalloc((void **)&ri_d, sizeof(Byte)*(N_ri * Qm * N_l)); hipMalloc((void **)&y_idx_d, sizeof(Byte)*(C_mux*R_prime_mux)); hipMalloc((void **)&y_mat_d, sizeof(Byte)*(C_mux*R_mux)); hipMalloc((void **)&received_bits_d, sizeof(Byte)* H_prime * Qm * N_l); //Byte* dempapped_h = (Byte*)malloc(sizeof(Byte)*H_prime * Qm * N_l); Byte* received_bits_h = (Byte*)malloc(sizeof(Byte)*H_prime * Qm * N_l); Byte* ri_h = (Byte*)malloc(sizeof(Byte)*N_ri * Qm * N_l); stopTimer("Allocation Time= %.6f ms\n"); startTimer(); //create plans int n[1] = { FFT_size }; hipfftHandle plan_sc_fdma; hipfftPlanMany(&plan_sc_fdma, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, HIPFFT_C2C, N_symbs_per_subframe); int N_SIGS = 12; //signal_size/M_pusch_sc = 12 * M_pusch_sc / M_pusch_sc = 12 n[0] = { M_pusch_sc }; hipfftHandle plan_transform_predecoder; hipfftPlanMany(&plan_transform_predecoder, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, HIPFFT_C2C, N_SIGS); stopTimer("Time of plan creation= %.6f ms\n"); for (int i = 0; i < 1000; i++) { startTimer(); generate_psuedo_random_seq(&c_h, N_bits, n_RNTI, n_s, N_id_cell); hipMemcpyAsync(c_d, c_h, sizeof(Byte)*N_bits, hipMemcpyHostToDevice); //sc-fdma demodulation sc_fdma_demodulator(subframe_d, M_pusch_rb, &demod_subframe_d, plan_sc_fdma, fft_vec_d); //generate dmrs generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, 0, &dmrs1_generated_d, &dmrs2_generated_d, x_q_d); //Decompose subframe decompose_subframe(demod_subframe_d, M_pusch_rb, &complex_data_d, &dmrs1_decomposed_d, &dmrs2_decomposed_d); //hipMemcpy(equalizer_input_h, complex_data_d, sizeof(hipfftComplex)*12* M_pusch_sc, hipMemcpyDeviceToHost); //hipMemcpy(dmrs1_generated_h, dmrs1_decomposed_d, sizeof(hipfftComplex)* M_pusch_sc, hipMemcpyDeviceToHost); //hipMemcpy(dmrs2_generated_h, dmrs2_decomposed_d, sizeof(hipfftComplex)* M_pusch_sc, hipMemcpyDeviceToHost); //Channel estimation channe_estimation(dmrs1_decomposed_d, dmrs2_decomposed_d, dmrs1_generated_d, dmrs2_generated_d, M_pusch_sc, &channel); //hipMemcpy(channel_h, channel, sizeof(hipfftComplex)* M_pusch_sc, hipMemcpyDeviceToHost); //Equalization ZF channel_equalization_zf(complex_data_d, M_pusch_sc , channel, &equalized_subframe_d); //test << < 1, 1 >> > (equalized_subframe_d); //hipMemcpy(equalized_subframe_h, equalized_subframe_d, sizeof(hipfftComplex)* 12 *M_pusch_sc, hipMemcpyDeviceToHost); //predecoding //transform_predecoder(equalized_subframe_d, &predecoded_data_d, plan_transform_predecoder); //signal_size = 12 * M_pusch_sc transform_predecoder(equalized_subframe_d , M_pusch_rb, 12 * M_pusch_sc, &predecoded_data_d, plan_transform_predecoder); //hipMemcpy(equalized_subframe_h, predecoded_data_d, sizeof(hipfftComplex) * 12 * M_pusch_sc, hipMemcpyDeviceToHost); //demapping //demapper(predecoded_data_d, M_pusch_rb, &bits_d, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc demapper(predecoded_data_d, &bits_d, Qm * 12 * M_pusch_sc, Qm); //hipMemcpy(dempapped_h, bits_d, sizeof(Byte)*H_prime * Qm * N_l, hipMemcpyDeviceToHost); //Descrammpling descrambler(bits_d, &descrambled_bits_d, c_d, N_bits); //deinterleaver deinterleaver(descrambled_bits_d, &ri_d, &received_bits_d, N_bits, N_ri, Qm, N_l, y_idx_d, y_mat_d); //Retrieve data from device hipMemcpy(received_bits_h, received_bits_d, sizeof(Byte)*H_prime * Qm * N_l, hipMemcpyDeviceToHost); hipMemcpy(ri_h, ri_d, sizeof(Byte)*N_ri * Qm * N_l, hipMemcpyDeviceToHost); end = std::chrono::steady_clock::now(); sum += (double)std::chrono::duration_cast<std::chrono::nanoseconds> (end - start).count() / 1000000.0; //roctxRangePop(); } printf("avg Time = %lf\n", sum / 1000.0); /* //Print results /*for (int i = 0; i < H_prime * Qm * N_l; i++) { printf("idx = %d \t %d \n", i + 1, received_bits_h[i]); } /* //test file FILE *results1; if ((results1 = freopen("Receiver_test.m", "w+", stdout)) == NULL) { printf("Cannot open file.\n"); exit(1); } //input subframe printf("clear; clc;\nsymbols_in_real = [ "); for (int i = 0; i < (modulated_subframe_length); i++) { printf("%10f", subframe_h[i].x); if (i != ((modulated_subframe_length)-1)) printf(","); } printf(" ];\nsymbols_in_imag = [ "); for (int i = 0; i < (modulated_subframe_length); i++) { printf("%10f", subframe_h[i].y); if (i != ((modulated_subframe_length)-1)) printf(","); } printf(" ];\n"); printf("subframe_input_CUDA = symbols_in_real + 1i * symbols_in_imag;\n"); //// Channel estimation //printf("x = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", channel_h[i].x); //} //printf(" ]; "); //printf("\n"); //printf("y = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", channel_h[i].y); //} //printf(" ];\n "); //printf("channel_cuda = x + 1i * y;\n"); //// dmrs //printf("x = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", dmrs1_generated_h[i].x); //} //printf(" ]; "); //printf("\n"); //printf("y = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", dmrs1_generated_h[i].y); //} //printf(" ];\n "); //printf("dmrs_1_cuda = x + 1i * y;\n"); //printf("x = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", dmrs2_generated_h[i].x); //} //printf(" ]; "); //printf("\n"); //printf("y = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", dmrs2_generated_h[i].y); //} //printf(" ];\n "); //printf("dmrs_2_cuda = x + 1i * y;\n"); ////decomposed subframe //printf("x = [ "); //for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++) //{ // printf("%f ", equalizer_input_h[i].x); //} //printf(" ]; "); //printf("\n"); //printf("y = [ "); //for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++) //{ // printf("%f ", equalizer_input_h[i].y); //} //printf(" ];\n "); //printf("equalizer_input_h = x + 1i * y;\n"); //// channel equalization //printf( "x = [ "); //for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++) //{ // printf("%10f ", equalized_subframe_h[i].x); //} //printf( " ]; "); //printf( "\n"); //printf( "y = [ "); //for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++) //{ // printf("%10f ", equalized_subframe_h[i].y); //} //printf( " ];\n "); //printf("predecoded_subframe_h = x + 1i * y;\n"); ////dempapped ////Received Bits //printf("\ndemapped_bits_cuda = [ "); //for (int i = 0; i < (H_prime * Qm * N_l); i++) //{ // printf("%d", dempapped_h[i]); // if (i != ((Qm * 12 * M_pusch_sc) - 1)) // printf(","); //} //printf(" ];\n"); //Received Bits printf("\nReceved_bits_cuda = [ "); for (int i = 0; i < (H_prime * Qm * N_l); i++) { printf("%d", received_bits_h[i]); if (i != ((Qm * 12 * M_pusch_sc)-1)) printf(","); } printf(" ];\n"); //RI Bits printf("\nRI_bits_cuda = [ "); for (int i = 0; i < (N_ri * Qm * N_l); i++) { printf("%d", ri_h[i]); if (i != ((N_ri * Qm * N_l) - 1)) printf(","); } printf(" ];\n"); printf("N_id_cell = 2;N_sc_rb = 12;M_pusch_rb = 100;M_pusch_sc = M_pusch_rb*N_sc_rb;Nc = 1600;n_s = 0;n_RNTI = 10;M_bits = 86400;N_l = 1;\nN_ri_bits = 0;N_ack_bits =0;Q_m = 6;\nmodulated_subframe = subframe_input_CUDA;\ndemodulated_subframe = sc_fdma_demodulator(modulated_subframe, M_pusch_rb);\ndemodulated_subframe_vect =[demodulated_subframe(0+1,:), demodulated_subframe(1+1,:), demodulated_subframe(2+1,:), demodulated_subframe(4+1,:), demodulated_subframe(5+1,:), demodulated_subframe(6+1,:), demodulated_subframe(7+1,:), demodulated_subframe(8+1,:), demodulated_subframe(9+1,:), demodulated_subframe(11+1,:), demodulated_subframe(12+1,:), demodulated_subframe(13+1,:)];\ndmrs = generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, 0);\ndmrs_1 = dmrs(1:M_pusch_sc);\ndmrs_2 = dmrs(M_pusch_sc+1:2*M_pusch_sc);\ndmrs_1_rx = demodulated_subframe(1+3,:);\ndmrs_2_rx = demodulated_subframe(1+10,:);\nchannel = estimate_channel_ls(dmrs_1_rx, dmrs_2_rx, dmrs_1, dmrs_2, M_pusch_sc, 7);\nequalised_subframe = equalise_channel_zf(demodulated_subframe_vect, channel);\npredecoded_data = transform_predecoder(equalised_subframe, M_pusch_rb);\n demapped_data = demapper_hard(predecoded_data, '64qam');\n c_init = n_RNTI * 2 ^ 14 + floor(n_s / 2) * 2 ^ 9 + N_id_cell;\n c = generate_psuedo_random_seq(c_init, M_bits);\n descrambled_bits = descramble(demapped_data, c);\n [data_bits, ri_bits, ack_bits] = channel_deinterleaver(descrambled_bits, N_ri_bits, N_ack_bits, N_l, Q_m); \nisequal(data_bits, Receved_bits_cuda)\nisequal(ri_bits, RI_bits_cuda)\n"); fclose(results1); return 0; */ }
f4e8df0dad609add681f8dcb44f2512c04a1d528.cu
/* % Function: receiver % Inputs: modulated_subframe - modulated subframe (modulated symbols) % M_pusch_rb - number of resource blocks assigned to the UE % Outputs: received_bits - received bits By: Mohammed Mostafa */ #include "sc_fdma_demodulator.cuh" #include "generate_dmrs_pusch.cuh" #include "generate_ul_rs.cuh" #include "generate_psuedo_random_seq.cuh" #include "transform_predecoder.cuh" #include "decompose_subframe.cuh" #include "demapper.cuh" #include "descrambler.cuh" #include "deinterleaver.cuh" #include "channel_estimation.cuh" #include "channel_equalization_zf.cuh" #include <chrono> #define timerInit(); std::chrono::steady_clock::time_point start; std::chrono::steady_clock::time_point end; #define startTimer(); start = std::chrono::steady_clock::now(); #define stopTimer(msg); end = std::chrono::steady_clock::now(); printf(msg, std::chrono::duration_cast<std::chrono::nanoseconds> (end - start).count()/1000000.0) ; //__global__ void test(cufftComplex* equalized_subframe_d) //{ // int x_idx = 0; // // // //} int main(int argc, char **argv) { //input cufftComplex* subframe_h = (cufftComplex *)malloc(sizeof(cufftComplex)*modulated_subframe_length); for (int i = 0; i < modulated_subframe_length; i++) { subframe_h[i].x = rand() / (float)RAND_MAX * 0.1; subframe_h[i].y = rand() / (float)RAND_MAX * 0.1; } //For timing purpose timerInit(); startTimer(); const int Qm = 6; // Modulation Order(2 = QPSK, 4 = 16QAM, 6 = 64QAM) const int M_pusch_rb = 100; //number of resource blocks assigned to the UE const int N_l = 1; // Number of Layers const int N_ri = 0; //length of ri symbols const int n_s = 0; //assume UE send on subframe 0 const int N_id_cell = 2; //assume enodeB scheduled cell 2 for the UE const int M_pusch_sc = N_sc_rb * M_pusch_rb; //total number of subcarriers const int n_RNTI = 10; //radio network temporary identifier given to the UE by enodeB (assume 10) const int N_bits = Qm * 12 * M_pusch_sc; //Qm * 12 * M_pusch_sc = 2*12*1200 //Copy (c) to Device //Generate Pseudo Random Seq. Byte *c_h = 0; Byte* c_d = 0; cudaMalloc((void **)&c_d, sizeof(Byte)*Qm * 12 * M_pusch_sc); cufftComplex* subframe_d; cudaMalloc((void **)&subframe_d, sizeof(cufftComplex)*modulated_subframe_length); double sum = 0, sum2 = 0; for (int i = 0; i < 1000; i++) { startTimer(); cudaMemcpy(subframe_d, subframe_h, sizeof(cufftComplex)*modulated_subframe_length, cudaMemcpyHostToDevice); end = std::chrono::steady_clock::now(); sum2 += (double)std::chrono::duration_cast<std::chrono::nanoseconds> (end - start).count() / 1000000.0; //nvtxRangePop(); } printf("avg Time including prs = %lf\n", sum2 / 1000.0); startTimer(); //Device data allocation cufftComplex* fft_vec_d; cufftComplex* demod_subframe_d; cudaMalloc((void **)&fft_vec_d, sizeof(cufftComplex)*N_symbs_per_subframe*FFT_size); cudaMalloc((void **)&demod_subframe_d, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc); cufftComplex* x_q_d; cufftComplex* dmrs1_generated_d = 0, *dmrs2_generated_d = 0; cudaMalloc((void **)&dmrs1_generated_d, sizeof(cufftComplex)*N_sc_rb*M_pusch_rb); cudaMalloc((void **)&dmrs2_generated_d, sizeof(cufftComplex)*N_sc_rb*M_pusch_rb); cudaMalloc((void **)&x_q_d, sizeof(cufftComplex)*prime_nums[M_pusch_rb - 1]); cufftComplex* dmrs1_decomposed_d; cufftComplex* dmrs2_decomposed_d; cufftComplex* complex_data_d; cudaMalloc((void **)&complex_data_d, sizeof(cufftComplex) * 12 * M_pusch_sc); cudaMalloc((void **)&dmrs1_decomposed_d, sizeof(cufftComplex)*M_pusch_sc); cudaMalloc((void **)&dmrs2_decomposed_d, sizeof(cufftComplex)*M_pusch_sc); // Channel estimation and equaliuzation allocation cufftComplex* channel, *equalized_subframe_d; cudaMalloc((void **)&channel, sizeof(cufftComplex)*M_pusch_sc); cudaMalloc((void **)&equalized_subframe_d, sizeof(cufftComplex)* 12 * M_pusch_sc); //cufftComplex* channel_h = (cufftComplex*)malloc(sizeof(cufftComplex)*M_pusch_sc); //cufftComplex* dmrs1_generated_h = (cufftComplex*)malloc(sizeof(cufftComplex)*M_pusch_sc); //cufftComplex* dmrs2_generated_h = (cufftComplex*)malloc(sizeof(cufftComplex)*M_pusch_sc); //cufftComplex* equalizer_input_h = (cufftComplex*)malloc(sizeof(cufftComplex) * 12 *M_pusch_sc); //cufftComplex* equalized_subframe_h = (cufftComplex*)malloc(sizeof(cufftComplex) * 12 * M_pusch_sc); cufftComplex* predecoded_data_d; cudaMalloc((void **)&predecoded_data_d, sizeof(cufftComplex)* 12 * M_pusch_sc); Byte *bits_d; cudaMalloc((void **)&bits_d, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits Byte *descrambled_bits_d; cudaMalloc((void **)&descrambled_bits_d, sizeof(Byte)* Qm * 12 * M_pusch_sc); // Step 1: Define C_mux int C_mux = N_pusch_symbs; // Step 2: Define R_mux and R_prime_mux int H_prime_total = N_bits / (Qm*N_l); int H_prime = H_prime_total - N_ri; int R_mux = (H_prime_total*Qm*N_l) / C_mux; int R_prime_mux = R_mux / (Qm*N_l); Byte *ri_d, *y_idx_d, *y_mat_d; Byte *received_bits_d; cudaMalloc((void **)&ri_d, sizeof(Byte)*(N_ri * Qm * N_l)); cudaMalloc((void **)&y_idx_d, sizeof(Byte)*(C_mux*R_prime_mux)); cudaMalloc((void **)&y_mat_d, sizeof(Byte)*(C_mux*R_mux)); cudaMalloc((void **)&received_bits_d, sizeof(Byte)* H_prime * Qm * N_l); //Byte* dempapped_h = (Byte*)malloc(sizeof(Byte)*H_prime * Qm * N_l); Byte* received_bits_h = (Byte*)malloc(sizeof(Byte)*H_prime * Qm * N_l); Byte* ri_h = (Byte*)malloc(sizeof(Byte)*N_ri * Qm * N_l); stopTimer("Allocation Time= %.6f ms\n"); startTimer(); //create plans int n[1] = { FFT_size }; cufftHandle plan_sc_fdma; cufftPlanMany(&plan_sc_fdma, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, CUFFT_C2C, N_symbs_per_subframe); int N_SIGS = 12; //signal_size/M_pusch_sc = 12 * M_pusch_sc / M_pusch_sc = 12 n[0] = { M_pusch_sc }; cufftHandle plan_transform_predecoder; cufftPlanMany(&plan_transform_predecoder, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, CUFFT_C2C, N_SIGS); stopTimer("Time of plan creation= %.6f ms\n"); for (int i = 0; i < 1000; i++) { startTimer(); generate_psuedo_random_seq(&c_h, N_bits, n_RNTI, n_s, N_id_cell); cudaMemcpyAsync(c_d, c_h, sizeof(Byte)*N_bits, cudaMemcpyHostToDevice); //sc-fdma demodulation sc_fdma_demodulator(subframe_d, M_pusch_rb, &demod_subframe_d, plan_sc_fdma, fft_vec_d); //generate dmrs generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, 0, &dmrs1_generated_d, &dmrs2_generated_d, x_q_d); //Decompose subframe decompose_subframe(demod_subframe_d, M_pusch_rb, &complex_data_d, &dmrs1_decomposed_d, &dmrs2_decomposed_d); //cudaMemcpy(equalizer_input_h, complex_data_d, sizeof(cufftComplex)*12* M_pusch_sc, cudaMemcpyDeviceToHost); //cudaMemcpy(dmrs1_generated_h, dmrs1_decomposed_d, sizeof(cufftComplex)* M_pusch_sc, cudaMemcpyDeviceToHost); //cudaMemcpy(dmrs2_generated_h, dmrs2_decomposed_d, sizeof(cufftComplex)* M_pusch_sc, cudaMemcpyDeviceToHost); //Channel estimation channe_estimation(dmrs1_decomposed_d, dmrs2_decomposed_d, dmrs1_generated_d, dmrs2_generated_d, M_pusch_sc, &channel); //cudaMemcpy(channel_h, channel, sizeof(cufftComplex)* M_pusch_sc, cudaMemcpyDeviceToHost); //Equalization ZF channel_equalization_zf(complex_data_d, M_pusch_sc , channel, &equalized_subframe_d); //test << < 1, 1 >> > (equalized_subframe_d); //cudaMemcpy(equalized_subframe_h, equalized_subframe_d, sizeof(cufftComplex)* 12 *M_pusch_sc, cudaMemcpyDeviceToHost); //predecoding //transform_predecoder(equalized_subframe_d, &predecoded_data_d, plan_transform_predecoder); //signal_size = 12 * M_pusch_sc transform_predecoder(equalized_subframe_d , M_pusch_rb, 12 * M_pusch_sc, &predecoded_data_d, plan_transform_predecoder); //cudaMemcpy(equalized_subframe_h, predecoded_data_d, sizeof(cufftComplex) * 12 * M_pusch_sc, cudaMemcpyDeviceToHost); //demapping //demapper(predecoded_data_d, M_pusch_rb, &bits_d, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc demapper(predecoded_data_d, &bits_d, Qm * 12 * M_pusch_sc, Qm); //cudaMemcpy(dempapped_h, bits_d, sizeof(Byte)*H_prime * Qm * N_l, cudaMemcpyDeviceToHost); //Descrammpling descrambler(bits_d, &descrambled_bits_d, c_d, N_bits); //deinterleaver deinterleaver(descrambled_bits_d, &ri_d, &received_bits_d, N_bits, N_ri, Qm, N_l, y_idx_d, y_mat_d); //Retrieve data from device cudaMemcpy(received_bits_h, received_bits_d, sizeof(Byte)*H_prime * Qm * N_l, cudaMemcpyDeviceToHost); cudaMemcpy(ri_h, ri_d, sizeof(Byte)*N_ri * Qm * N_l, cudaMemcpyDeviceToHost); end = std::chrono::steady_clock::now(); sum += (double)std::chrono::duration_cast<std::chrono::nanoseconds> (end - start).count() / 1000000.0; //nvtxRangePop(); } printf("avg Time = %lf\n", sum / 1000.0); /* //Print results /*for (int i = 0; i < H_prime * Qm * N_l; i++) { printf("idx = %d \t %d \n", i + 1, received_bits_h[i]); } /* //test file FILE *results1; if ((results1 = freopen("Receiver_test.m", "w+", stdout)) == NULL) { printf("Cannot open file.\n"); exit(1); } //input subframe printf("clear; clc;\nsymbols_in_real = [ "); for (int i = 0; i < (modulated_subframe_length); i++) { printf("%10f", subframe_h[i].x); if (i != ((modulated_subframe_length)-1)) printf(","); } printf(" ];\nsymbols_in_imag = [ "); for (int i = 0; i < (modulated_subframe_length); i++) { printf("%10f", subframe_h[i].y); if (i != ((modulated_subframe_length)-1)) printf(","); } printf(" ];\n"); printf("subframe_input_CUDA = symbols_in_real + 1i * symbols_in_imag;\n"); //// Channel estimation //printf("x = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", channel_h[i].x); //} //printf(" ]; "); //printf("\n"); //printf("y = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", channel_h[i].y); //} //printf(" ];\n "); //printf("channel_cuda = x + 1i * y;\n"); //// dmrs //printf("x = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", dmrs1_generated_h[i].x); //} //printf(" ]; "); //printf("\n"); //printf("y = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", dmrs1_generated_h[i].y); //} //printf(" ];\n "); //printf("dmrs_1_cuda = x + 1i * y;\n"); //printf("x = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", dmrs2_generated_h[i].x); //} //printf(" ]; "); //printf("\n"); //printf("y = [ "); //for (int i = 0; i < M_pusch_sc; i++) //{ // printf("%f ", dmrs2_generated_h[i].y); //} //printf(" ];\n "); //printf("dmrs_2_cuda = x + 1i * y;\n"); ////decomposed subframe //printf("x = [ "); //for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++) //{ // printf("%f ", equalizer_input_h[i].x); //} //printf(" ]; "); //printf("\n"); //printf("y = [ "); //for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++) //{ // printf("%f ", equalizer_input_h[i].y); //} //printf(" ];\n "); //printf("equalizer_input_h = x + 1i * y;\n"); //// channel equalization //printf( "x = [ "); //for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++) //{ // printf("%10f ", equalized_subframe_h[i].x); //} //printf( " ]; "); //printf( "\n"); //printf( "y = [ "); //for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++) //{ // printf("%10f ", equalized_subframe_h[i].y); //} //printf( " ];\n "); //printf("predecoded_subframe_h = x + 1i * y;\n"); ////dempapped ////Received Bits //printf("\ndemapped_bits_cuda = [ "); //for (int i = 0; i < (H_prime * Qm * N_l); i++) //{ // printf("%d", dempapped_h[i]); // if (i != ((Qm * 12 * M_pusch_sc) - 1)) // printf(","); //} //printf(" ];\n"); //Received Bits printf("\nReceved_bits_cuda = [ "); for (int i = 0; i < (H_prime * Qm * N_l); i++) { printf("%d", received_bits_h[i]); if (i != ((Qm * 12 * M_pusch_sc)-1)) printf(","); } printf(" ];\n"); //RI Bits printf("\nRI_bits_cuda = [ "); for (int i = 0; i < (N_ri * Qm * N_l); i++) { printf("%d", ri_h[i]); if (i != ((N_ri * Qm * N_l) - 1)) printf(","); } printf(" ];\n"); printf("N_id_cell = 2;N_sc_rb = 12;M_pusch_rb = 100;M_pusch_sc = M_pusch_rb*N_sc_rb;Nc = 1600;n_s = 0;n_RNTI = 10;M_bits = 86400;N_l = 1;\nN_ri_bits = 0;N_ack_bits =0;Q_m = 6;\nmodulated_subframe = subframe_input_CUDA;\ndemodulated_subframe = sc_fdma_demodulator(modulated_subframe, M_pusch_rb);\ndemodulated_subframe_vect =[demodulated_subframe(0+1,:), demodulated_subframe(1+1,:), demodulated_subframe(2+1,:), demodulated_subframe(4+1,:), demodulated_subframe(5+1,:), demodulated_subframe(6+1,:), demodulated_subframe(7+1,:), demodulated_subframe(8+1,:), demodulated_subframe(9+1,:), demodulated_subframe(11+1,:), demodulated_subframe(12+1,:), demodulated_subframe(13+1,:)];\ndmrs = generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, 0);\ndmrs_1 = dmrs(1:M_pusch_sc);\ndmrs_2 = dmrs(M_pusch_sc+1:2*M_pusch_sc);\ndmrs_1_rx = demodulated_subframe(1+3,:);\ndmrs_2_rx = demodulated_subframe(1+10,:);\nchannel = estimate_channel_ls(dmrs_1_rx, dmrs_2_rx, dmrs_1, dmrs_2, M_pusch_sc, 7);\nequalised_subframe = equalise_channel_zf(demodulated_subframe_vect, channel);\npredecoded_data = transform_predecoder(equalised_subframe, M_pusch_rb);\n demapped_data = demapper_hard(predecoded_data, '64qam');\n c_init = n_RNTI * 2 ^ 14 + floor(n_s / 2) * 2 ^ 9 + N_id_cell;\n c = generate_psuedo_random_seq(c_init, M_bits);\n descrambled_bits = descramble(demapped_data, c);\n [data_bits, ri_bits, ack_bits] = channel_deinterleaver(descrambled_bits, N_ri_bits, N_ack_bits, N_l, Q_m); \nisequal(data_bits, Receved_bits_cuda)\nisequal(ri_bits, RI_bits_cuda)\n"); fclose(results1); return 0; */ }
f33306b306ca89cacba68d7e7114896aa3c74309.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cl_device_assist.cuh" #include "cl_interface_shared.h" __global__ void clcuda_func_while_loop( int32_t *var_A, int32_t var_a, CommonKernelData data ) { if (blockIdx.x * blockDim.x + threadIdx.x >= data.totalX) return; if (blockIdx.y * blockDim.y + threadIdx.y >= data.totalY) return; if (blockIdx.z * blockDim.z + threadIdx.z >= data.totalZ) return; size_t var_i = clcuda_builtin_get_global_id(0, data); while (var_A[var_i] < var_a) { var_A[var_i] *= 2; } } KERNEL_LAUNCHER void clcuda_launcher_while_loop( struct _cl_kernel *desc, float *elapsedMs ) { dim3 num_grids = dim3(desc->gridX, desc->gridY, desc->gridZ); dim3 local_size = dim3(desc->localX, desc->localY, desc->localZ); hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); hipLaunchKernelGGL(( clcuda_func_while_loop), dim3(num_grids), dim3(local_size), 0, 0, (int32_t*) desc->arg_data[0], *(int32_t*) desc->arg_data[1], CommonKernelData(desc->totalX, desc->totalY, desc->totalZ) ); hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(elapsedMs, start, end); }
f33306b306ca89cacba68d7e7114896aa3c74309.cu
#include "cl_device_assist.cuh" #include "cl_interface_shared.h" __global__ void clcuda_func_while_loop( int32_t *var_A, int32_t var_a, CommonKernelData data ) { if (blockIdx.x * blockDim.x + threadIdx.x >= data.totalX) return; if (blockIdx.y * blockDim.y + threadIdx.y >= data.totalY) return; if (blockIdx.z * blockDim.z + threadIdx.z >= data.totalZ) return; size_t var_i = clcuda_builtin_get_global_id(0, data); while (var_A[var_i] < var_a) { var_A[var_i] *= 2; } } KERNEL_LAUNCHER void clcuda_launcher_while_loop( struct _cl_kernel *desc, float *elapsedMs ) { dim3 num_grids = dim3(desc->gridX, desc->gridY, desc->gridZ); dim3 local_size = dim3(desc->localX, desc->localY, desc->localZ); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); clcuda_func_while_loop<<<num_grids, local_size>>>( (int32_t*) desc->arg_data[0], *(int32_t*) desc->arg_data[1], CommonKernelData(desc->totalX, desc->totalY, desc->totalZ) ); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(elapsedMs, start, end); }
686b01404ff090dd33517896fb5235f96c7b91a1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "sieveOfEratosthenes.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *primes = NULL; hipMalloc(&primes, XSIZE*YSIZE); uint64_t max = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( sieveOfEratosthenes), dim3(gridBlock),dim3(threadBlock), 0, 0, primes,max); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( sieveOfEratosthenes), dim3(gridBlock),dim3(threadBlock), 0, 0, primes,max); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( sieveOfEratosthenes), dim3(gridBlock),dim3(threadBlock), 0, 0, primes,max); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
686b01404ff090dd33517896fb5235f96c7b91a1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "sieveOfEratosthenes.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *primes = NULL; cudaMalloc(&primes, XSIZE*YSIZE); uint64_t max = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); sieveOfEratosthenes<<<gridBlock,threadBlock>>>(primes,max); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { sieveOfEratosthenes<<<gridBlock,threadBlock>>>(primes,max); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { sieveOfEratosthenes<<<gridBlock,threadBlock>>>(primes,max); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f1e9c84fd8ed7184961cb3297201b6afe17ac261.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #define NUM_THREADS 1024 #define EPSILON 0.0001 //#define EPSILON 0.00001 //--> sometimes fails from too small error #define height 256 #define width 10 #define UPPER 0.01 #define LOWER -0.01 // KERNEL: x*A = B __global__ void MatMul(float* x, float* A, float* B) { // index into flattened weights matrix int i = blockDim.x * blockIdx.x + threadIdx.x; // index into the input vector int row = i / width; // index into the output vector int col = i % width; //__shared__ float local_output[]; // zero out resultant vector B if (i < width) B[i] = 0.0; __syncthreads(); if ((i < height * width) && (row < height)) { // TODO: atomicAdd to local, shared output vectors --> atomicAdd to global atomicAdd(&B[col], x[row] * A[i]); __syncthreads(); if (i < width) { // SOFTMAX CALCULATION __shared__ float sum; // 1. store the exp() of each output value __shared__ float exp_vector[width]; exp_vector[i] = expf(B[i]); // 2. calculate the sum of all the exponent values // --> width < BLOCK_SIZE, so this will only be in the first block if (threadIdx.x == 0) sum = 0.0; __syncthreads(); // wait for sum to ve zeroed atomicAdd(&sum, exp_vector[i]); __syncthreads(); // 3. store new output value B[i] = exp_vector[i] / sum; } } } // HOST int main(int argc, char** argv) { // Variables float *h_x, *h_A, *h_B, *d_x, *d_A, *d_B; //int height = 256; //int width = 100; // Allocate vectors and matrices in host memory and device memory h_x = (float*)malloc(height*sizeof(float)); h_A = (float*)malloc(height*width*sizeof(float)); h_B = (float*)malloc(width*sizeof(float)); hipMalloc((void**)&d_x, height*sizeof(float)); hipMalloc((void**)&d_A, height*width*sizeof(float)); hipMalloc((void**)&d_B, width*sizeof(float)); // Initialize input vector x for (int i = 0; i < height; ++i) { //h_x[i] = (rand() / (float)RAND_MAX)*(UPPER - LOWER) + LOWER; h_x[i] = rand() / (float)RAND_MAX - 0.5; //printf("h_x[%i]: %f\n", i, h_x[i]); } // Initialize input matrix A for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { // initialize weights matrix values to be between LOWER and UPPER h_A[i*width + j] = (rand() / (float)RAND_MAX)*(UPPER - LOWER) + LOWER; } } // Copy vectors from host memory to device memory hipMemcpy(d_x, h_x, height*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_A, h_A, height*width*sizeof(float), hipMemcpyHostToDevice); // FILL IN KERNEL SETUP AND INVOCATION int blocks = (height*width) / NUM_THREADS; if ((height*width) % NUM_THREADS != 0) blocks++; hipLaunchKernelGGL(( MatMul) , dim3(blocks), dim3(NUM_THREADS) , 0, 0, d_x, d_A, d_B); hipDeviceSynchronize(); // Copy result from device memory to host memory hipMemcpy(h_B, d_B, width*sizeof(float), hipMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare float* result = (float*)malloc(width*sizeof(float)); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { // zero out result elements if (i == 0) result[j] = 0.0; result[j] += h_x[i] * h_A[i*width + j]; } } // 1. calculate sum float sum = 0.0; float exp_vec[width]; for (int j = 0; j < width; j++) { //exp_vec[j] = (float)exp((double)result[j]); exp_vec[j] = expf(result[j]); // sum up the exp value just calculated sum += exp_vec[j]; //printf("result[%i]: %f\n", j, result[j]); } //printf("-->result sum: %f\n", sum); float r_sum = 0.0; float b_sum = 0.0; for (int j = 0; j < width; j++) { result[j] = exp_vec[j] / sum; r_sum += result[j]; b_sum += h_B[j]; if (fabs(h_B[j] - result[j]) > EPSILON) { printf("ERROR: expected h_B[%i] = %f but received %f\n", j, result[j], h_B[j]); correct = false; //break; } else { printf("result[j]: %f\th_B[j]: %f\n", result[j], h_B[j]); } } printf("-->result sum: %f\n", r_sum); printf("-->h_B sum: %f\n", b_sum); if (correct) printf("\n---PASSED---\n"); else printf("\n---FAILED---\n"); // Free host and device memory hipFree(d_x); hipFree(d_A); hipFree(d_B); free(h_x); free(h_A); free(h_B); free(result); }
f1e9c84fd8ed7184961cb3297201b6afe17ac261.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #define NUM_THREADS 1024 #define EPSILON 0.0001 //#define EPSILON 0.00001 //--> sometimes fails from too small error #define height 256 #define width 10 #define UPPER 0.01 #define LOWER -0.01 // KERNEL: x*A = B __global__ void MatMul(float* x, float* A, float* B) { // index into flattened weights matrix int i = blockDim.x * blockIdx.x + threadIdx.x; // index into the input vector int row = i / width; // index into the output vector int col = i % width; //__shared__ float local_output[]; // zero out resultant vector B if (i < width) B[i] = 0.0; __syncthreads(); if ((i < height * width) && (row < height)) { // TODO: atomicAdd to local, shared output vectors --> atomicAdd to global atomicAdd(&B[col], x[row] * A[i]); __syncthreads(); if (i < width) { // SOFTMAX CALCULATION __shared__ float sum; // 1. store the exp() of each output value __shared__ float exp_vector[width]; exp_vector[i] = expf(B[i]); // 2. calculate the sum of all the exponent values // --> width < BLOCK_SIZE, so this will only be in the first block if (threadIdx.x == 0) sum = 0.0; __syncthreads(); // wait for sum to ve zeroed atomicAdd(&sum, exp_vector[i]); __syncthreads(); // 3. store new output value B[i] = exp_vector[i] / sum; } } } // HOST int main(int argc, char** argv) { // Variables float *h_x, *h_A, *h_B, *d_x, *d_A, *d_B; //int height = 256; //int width = 100; // Allocate vectors and matrices in host memory and device memory h_x = (float*)malloc(height*sizeof(float)); h_A = (float*)malloc(height*width*sizeof(float)); h_B = (float*)malloc(width*sizeof(float)); cudaMalloc((void**)&d_x, height*sizeof(float)); cudaMalloc((void**)&d_A, height*width*sizeof(float)); cudaMalloc((void**)&d_B, width*sizeof(float)); // Initialize input vector x for (int i = 0; i < height; ++i) { //h_x[i] = (rand() / (float)RAND_MAX)*(UPPER - LOWER) + LOWER; h_x[i] = rand() / (float)RAND_MAX - 0.5; //printf("h_x[%i]: %f\n", i, h_x[i]); } // Initialize input matrix A for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { // initialize weights matrix values to be between LOWER and UPPER h_A[i*width + j] = (rand() / (float)RAND_MAX)*(UPPER - LOWER) + LOWER; } } // Copy vectors from host memory to device memory cudaMemcpy(d_x, h_x, height*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_A, h_A, height*width*sizeof(float), cudaMemcpyHostToDevice); // FILL IN KERNEL SETUP AND INVOCATION int blocks = (height*width) / NUM_THREADS; if ((height*width) % NUM_THREADS != 0) blocks++; MatMul <<< blocks, NUM_THREADS >>> (d_x, d_A, d_B); cudaDeviceSynchronize(); // Copy result from device memory to host memory cudaMemcpy(h_B, d_B, width*sizeof(float), cudaMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare float* result = (float*)malloc(width*sizeof(float)); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { // zero out result elements if (i == 0) result[j] = 0.0; result[j] += h_x[i] * h_A[i*width + j]; } } // 1. calculate sum float sum = 0.0; float exp_vec[width]; for (int j = 0; j < width; j++) { //exp_vec[j] = (float)exp((double)result[j]); exp_vec[j] = expf(result[j]); // sum up the exp value just calculated sum += exp_vec[j]; //printf("result[%i]: %f\n", j, result[j]); } //printf("-->result sum: %f\n", sum); float r_sum = 0.0; float b_sum = 0.0; for (int j = 0; j < width; j++) { result[j] = exp_vec[j] / sum; r_sum += result[j]; b_sum += h_B[j]; if (fabs(h_B[j] - result[j]) > EPSILON) { printf("ERROR: expected h_B[%i] = %f but received %f\n", j, result[j], h_B[j]); correct = false; //break; } else { printf("result[j]: %f\th_B[j]: %f\n", result[j], h_B[j]); } } printf("-->result sum: %f\n", r_sum); printf("-->h_B sum: %f\n", b_sum); if (correct) printf("\n---PASSED---\n"); else printf("\n---FAILED---\n"); // Free host and device memory cudaFree(d_x); cudaFree(d_A); cudaFree(d_B); free(h_x); free(h_A); free(h_B); free(result); }
8521131b2c307ad9d6a0bb1c074de4fe6c36f754.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace sd { /////////////////////////////////////////////////////////////////////// template<typename T> __device__ void concatKernel(int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, Nd4jLong *zTadShape, Nd4jLong *zOffsets) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int zRank = shape::rank(resultShapeInfo); auto result = reinterpret_cast<T*>(vz); auto dataT = reinterpret_cast<T **>(data); auto shapeInfoPointers = reinterpret_cast<Nd4jLong **>(inputShapeInfos); auto tadShapes = reinterpret_cast<Nd4jLong **>(tadPointers); auto tadOffsets = reinterpret_cast<Nd4jLong **>(offsetPointers); //if (threadIdx.x == 0 && blockIdx.x == 0) { // shape::printShapeInfoLinear("zTadShape", zTadShape); //} //__shared__ int tDim[1]; __shared__ int baseIdx; __shared__ int yLength; __shared__ char yOrder; __shared__ int yEWS; char zOrder = shape::order(resultShapeInfo); int zEWS = shape::elementWiseStride(resultShapeInfo); int tadEWS = shape::elementWiseStride(zTadShape); int zLength = shape::length(resultShapeInfo); __shared__ int arrOffset; __shared__ int numTads; if (shape::isVector(resultShapeInfo)) { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Vector here\n"); if (zEWS >= 1) { for (int r = blockIdx.x; r < numArrays; r += gridDim.x) { if(shape::isVector(shapeInfoPointers[r]) || shape::order(shapeInfoPointers[r]) == shape::order(resultShapeInfo)) { yLength = shape::length(shapeInfoPointers[r]); yEWS = shape::elementWiseStride(shapeInfoPointers[r]); // FIXME: this is bad __shared__ int baseIdx; if (threadIdx.x == 0) { baseIdx = 0; for (int f = 0; f < r; f++) { baseIdx += shape::length(shapeInfoPointers[f]); } } __syncthreads(); for (int i = threadIdx.x; i < yLength && baseIdx + i < zLength; i += blockDim.x) { result[baseIdx + i * zEWS] = dataT[r][i * yEWS]; } __syncthreads(); } else { if (tid == 0) printf("Non-matched order for vector\n"); } } } else { if (tid == 0) printf("Vector Non-1 zEWS\n"); } return; } bool _vec = shape::isVector(resultShapeInfo); // TODO: to be pulled into separate kernel. matrix concatenation for (int r = 0; r < numArrays; r ++) { auto currentShape = shapeInfoPointers[r]; auto currentData = dataT[r]; auto currentTad = tadShapes[r]; auto currentOffsets = tadOffsets[r]; if (threadIdx.x == 0) { yLength = shape::length(currentTad); yOrder = shape::order(currentTad); yEWS = shape::elementWiseStride(currentTad); numTads = shape::length(currentShape) / yLength; arrOffset = 0; for (int f = 0; f < r; f++) { arrOffset += shape::length(tadShapes[f]); } //if (threadIdx.x == 0 && blockIdx.x == 0) { // shape::printShapeInfoLinear("currentTad", currentTad); //} } __syncthreads(); if (yLength == 1 && _vec) { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch 0\n"); // edge case, each thread will handle it's own tad then for (int j = tid; j < numTads; j += blockDim.x * gridDim.x) { Nd4jLong inputOffset = currentOffsets[j]; Nd4jLong resultOffset = zOffsets[j]; T *dataTAD = currentData + inputOffset; T *resultTAD = result + resultOffset; int sub[MAX_RANK]; shape::index2coords(arrOffset, zTadShape, sub); Nd4jLong baseOffset = shape::getOffset(zTadShape, sub); resultTAD += baseOffset; auto yRank = shape::rank(currentTad); auto tadRank = shape::rank(zTadShape); shape::index2coords(0, currentTad, sub); auto yOffset = shape::getOffset(currentTad, sub); resultOffset = shape::getOffset(zTadShape, sub); resultTAD[resultOffset] = dataTAD[yOffset]; } } else { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch 1\n"); for (int j = blockIdx.x; j < numTads; j += gridDim.x) { auto inputOffset = currentOffsets[j]; auto resultOffset = zOffsets[j]; auto dataTAD = currentData + inputOffset; auto resultTAD = result + resultOffset; int sub[MAX_RANK]; shape::index2coords(arrOffset, zTadShape, sub); Nd4jLong baseOffset = shape::getOffset(zTadShape, sub); resultTAD += baseOffset; if (zOrder == yOrder && yEWS > 0 && tadEWS > 0) { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch A\n"); for (int i = threadIdx.x; i < yLength; i += blockDim.x) { resultTAD[i * tadEWS] = dataTAD[i * yEWS]; } } else { if(tadEWS > 0 && shape::order(resultShapeInfo) == shape::order(currentTad)) { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch B\n"); if (threadIdx.x == 0) { baseIdx = 0; for (int f = 0; f < r; f++) { baseIdx += shape::length(shapeInfoPointers[f]); } //printf("R: %i; baseIdx: %i;\n", baseIdx); } __syncthreads(); if (numTads == 1) { for(int k = threadIdx.x; k < yLength; k+= blockDim.x) { resultTAD[baseIdx + k * tadEWS] = dataTAD[k]; } } else { int yIdx[MAX_RANK]; auto yRank = shape::rank(currentTad); for (int i = threadIdx.x; i < yLength; i+= blockDim.x) { shape::index2coords(i, currentTad, yIdx); auto yOffset = shape::getOffset(currentTad, yIdx); resultTAD[baseIdx + i * tadEWS] = dataTAD[yOffset]; } } __syncthreads(); } else { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch C; yLength: %i;\n", yLength); int zIdx[MAX_RANK]; int yIdx[MAX_RANK]; auto yRank = shape::rank(currentTad); auto tadRank = shape::rank(zTadShape); for (int i = threadIdx.x; i < yLength; i+= blockDim.x) { shape::index2coords(i, currentTad, yIdx); shape::index2coords(i, zTadShape, zIdx); auto yOffset = shape::getOffset(currentTad, yIdx); auto resultOffset = shape::getOffset(zTadShape, zIdx); resultTAD[resultOffset] = dataTAD[yOffset]; } } } __syncthreads(); } } __syncthreads(); } } /////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execConcatKernel(int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, Nd4jLong *zTadShape, Nd4jLong *zOffsets) { concatKernel<T>(numArrays, data, inputShapeInfos, vz, zShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets); } /////////////////////////////////////////////////////////////////////// template<typename T> __host__ void concatKernelGeneric(dim3 &launchDims, hipStream_t *stream, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, Nd4jLong *zTadShape, Nd4jLong *zOffsets) { hipLaunchKernelGGL(( execConcatKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, numArrays, data, inputShapeInfos, vz, zShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets); sd::DebugHelper::checkErrorCode(stream, "concatGenericLegacy(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL concatKernelGeneric, (dim3 & launchDims, hipStream_t * stream, int numArrays, Nd4jPointer * data, Nd4jPointer * inputShapeInfos, void * vz, Nd4jLong *zShapeInfo, Nd4jPointer * tadPointers, Nd4jPointer * offsetPointers, Nd4jLong * zTadShape, Nd4jLong * zOffsets), LIBND4J_TYPES); }
8521131b2c307ad9d6a0bb1c074de4fe6c36f754.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace sd { /////////////////////////////////////////////////////////////////////// template<typename T> __device__ void concatKernel(int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, Nd4jLong *zTadShape, Nd4jLong *zOffsets) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int zRank = shape::rank(resultShapeInfo); auto result = reinterpret_cast<T*>(vz); auto dataT = reinterpret_cast<T **>(data); auto shapeInfoPointers = reinterpret_cast<Nd4jLong **>(inputShapeInfos); auto tadShapes = reinterpret_cast<Nd4jLong **>(tadPointers); auto tadOffsets = reinterpret_cast<Nd4jLong **>(offsetPointers); //if (threadIdx.x == 0 && blockIdx.x == 0) { // shape::printShapeInfoLinear("zTadShape", zTadShape); //} //__shared__ int tDim[1]; __shared__ int baseIdx; __shared__ int yLength; __shared__ char yOrder; __shared__ int yEWS; char zOrder = shape::order(resultShapeInfo); int zEWS = shape::elementWiseStride(resultShapeInfo); int tadEWS = shape::elementWiseStride(zTadShape); int zLength = shape::length(resultShapeInfo); __shared__ int arrOffset; __shared__ int numTads; if (shape::isVector(resultShapeInfo)) { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Vector here\n"); if (zEWS >= 1) { for (int r = blockIdx.x; r < numArrays; r += gridDim.x) { if(shape::isVector(shapeInfoPointers[r]) || shape::order(shapeInfoPointers[r]) == shape::order(resultShapeInfo)) { yLength = shape::length(shapeInfoPointers[r]); yEWS = shape::elementWiseStride(shapeInfoPointers[r]); // FIXME: this is bad __shared__ int baseIdx; if (threadIdx.x == 0) { baseIdx = 0; for (int f = 0; f < r; f++) { baseIdx += shape::length(shapeInfoPointers[f]); } } __syncthreads(); for (int i = threadIdx.x; i < yLength && baseIdx + i < zLength; i += blockDim.x) { result[baseIdx + i * zEWS] = dataT[r][i * yEWS]; } __syncthreads(); } else { if (tid == 0) printf("Non-matched order for vector\n"); } } } else { if (tid == 0) printf("Vector Non-1 zEWS\n"); } return; } bool _vec = shape::isVector(resultShapeInfo); // TODO: to be pulled into separate kernel. matrix concatenation for (int r = 0; r < numArrays; r ++) { auto currentShape = shapeInfoPointers[r]; auto currentData = dataT[r]; auto currentTad = tadShapes[r]; auto currentOffsets = tadOffsets[r]; if (threadIdx.x == 0) { yLength = shape::length(currentTad); yOrder = shape::order(currentTad); yEWS = shape::elementWiseStride(currentTad); numTads = shape::length(currentShape) / yLength; arrOffset = 0; for (int f = 0; f < r; f++) { arrOffset += shape::length(tadShapes[f]); } //if (threadIdx.x == 0 && blockIdx.x == 0) { // shape::printShapeInfoLinear("currentTad", currentTad); //} } __syncthreads(); if (yLength == 1 && _vec) { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch 0\n"); // edge case, each thread will handle it's own tad then for (int j = tid; j < numTads; j += blockDim.x * gridDim.x) { Nd4jLong inputOffset = currentOffsets[j]; Nd4jLong resultOffset = zOffsets[j]; T *dataTAD = currentData + inputOffset; T *resultTAD = result + resultOffset; int sub[MAX_RANK]; shape::index2coords(arrOffset, zTadShape, sub); Nd4jLong baseOffset = shape::getOffset(zTadShape, sub); resultTAD += baseOffset; auto yRank = shape::rank(currentTad); auto tadRank = shape::rank(zTadShape); shape::index2coords(0, currentTad, sub); auto yOffset = shape::getOffset(currentTad, sub); resultOffset = shape::getOffset(zTadShape, sub); resultTAD[resultOffset] = dataTAD[yOffset]; } } else { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch 1\n"); for (int j = blockIdx.x; j < numTads; j += gridDim.x) { auto inputOffset = currentOffsets[j]; auto resultOffset = zOffsets[j]; auto dataTAD = currentData + inputOffset; auto resultTAD = result + resultOffset; int sub[MAX_RANK]; shape::index2coords(arrOffset, zTadShape, sub); Nd4jLong baseOffset = shape::getOffset(zTadShape, sub); resultTAD += baseOffset; if (zOrder == yOrder && yEWS > 0 && tadEWS > 0) { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch A\n"); for (int i = threadIdx.x; i < yLength; i += blockDim.x) { resultTAD[i * tadEWS] = dataTAD[i * yEWS]; } } else { if(tadEWS > 0 && shape::order(resultShapeInfo) == shape::order(currentTad)) { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch B\n"); if (threadIdx.x == 0) { baseIdx = 0; for (int f = 0; f < r; f++) { baseIdx += shape::length(shapeInfoPointers[f]); } //printf("R: %i; baseIdx: %i;\n", baseIdx); } __syncthreads(); if (numTads == 1) { for(int k = threadIdx.x; k < yLength; k+= blockDim.x) { resultTAD[baseIdx + k * tadEWS] = dataTAD[k]; } } else { int yIdx[MAX_RANK]; auto yRank = shape::rank(currentTad); for (int i = threadIdx.x; i < yLength; i+= blockDim.x) { shape::index2coords(i, currentTad, yIdx); auto yOffset = shape::getOffset(currentTad, yIdx); resultTAD[baseIdx + i * tadEWS] = dataTAD[yOffset]; } } __syncthreads(); } else { //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Branch C; yLength: %i;\n", yLength); int zIdx[MAX_RANK]; int yIdx[MAX_RANK]; auto yRank = shape::rank(currentTad); auto tadRank = shape::rank(zTadShape); for (int i = threadIdx.x; i < yLength; i+= blockDim.x) { shape::index2coords(i, currentTad, yIdx); shape::index2coords(i, zTadShape, zIdx); auto yOffset = shape::getOffset(currentTad, yIdx); auto resultOffset = shape::getOffset(zTadShape, zIdx); resultTAD[resultOffset] = dataTAD[yOffset]; } } } __syncthreads(); } } __syncthreads(); } } /////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execConcatKernel(int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, Nd4jLong *zTadShape, Nd4jLong *zOffsets) { concatKernel<T>(numArrays, data, inputShapeInfos, vz, zShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets); } /////////////////////////////////////////////////////////////////////// template<typename T> __host__ void concatKernelGeneric(dim3 &launchDims, cudaStream_t *stream, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, Nd4jLong *zTadShape, Nd4jLong *zOffsets) { execConcatKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(numArrays, data, inputShapeInfos, vz, zShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets); sd::DebugHelper::checkErrorCode(stream, "concatGenericLegacy(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL concatKernelGeneric, (dim3 & launchDims, cudaStream_t * stream, int numArrays, Nd4jPointer * data, Nd4jPointer * inputShapeInfos, void * vz, Nd4jLong *zShapeInfo, Nd4jPointer * tadPointers, Nd4jPointer * offsetPointers, Nd4jLong * zTadShape, Nd4jLong * zOffsets), LIBND4J_TYPES); }
9bc0ac84776e6fb86c4230190294212717dce9a7.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <hip/hip_runtime.h> #include <chrono> __global__ void warmingup(float *c) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float a, b; a=b=0.0f; if (tid%2==0){ a=100.0f; } else { b=200.0f; } c[tid] = a+b; } __global__ void mathKernel1(float *c) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float a, b; a=b=0.0f; if (tid%2==0){ a=100.0f; } else { b=200.0f; } c[tid] = a+b; } __global__ void mathKernel2(float *c) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float a, b; a=b=0.0f; if (tid%2==0){ a=100.0f; } else { b=200.0f; } c[tid] = a+b; } __global__ void mathKernel3(float *c) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float a, b; a=b=0.0f; bool ipred = (tid&2==0); if (ipred){ a=100.0f; } else { b=200.0f; } c[tid] = a+b; } int main(int argc, char **argv) { // setup device int dev=0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); std::cout << argv[0] << " using device " << dev << ": " << deviceProp.name << std::endl; // set up data size int size = 64; int blocksize = 64; if(argc>1) blocksize = atoi(argv[1]); if(argc>2) blocksize = atoi(argv[2]); std::cout << "Data size " << size << std::endl; // set up execution configuration dim3 block (blocksize, 1); dim3 grid ((size+block.x-1)/block.x, 1); std::cout << "Execution configure (block " << block.x << " grid " << grid.x << ")" << std::endl; // allocate gpu memory float *d_C; size_t nBytes = size*sizeof(float); hipMalloc((float**)&d_C, nBytes); // run a warmup kernel to remove overhead hipDeviceSynchronize(); auto iStart = std::chrono::system_clock::now(); hipLaunchKernelGGL(( warmingup), dim3(grid), dim3(block), 0, 0, d_C); hipDeviceSynchronize(); auto iElaps = std::chrono::system_clock::now() - iStart; auto nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count(); std::cout << "warmup <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl; // run kernel 1 iStart = std::chrono::system_clock::now(); hipLaunchKernelGGL(( mathKernel1), dim3(grid), dim3(block), 0, 0, d_C); hipDeviceSynchronize(); iElaps = std::chrono::system_clock::now() - iStart; nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count(); std::cout << "kernel1 <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl; // run kernel 2 iStart = std::chrono::system_clock::now(); hipLaunchKernelGGL(( mathKernel2), dim3(grid), dim3(block), 0, 0, d_C); hipDeviceSynchronize(); iElaps = std::chrono::system_clock::now() - iStart; nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count(); std::cout << "kernel2 <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl; // run kernel 3 iStart = std::chrono::system_clock::now(); hipLaunchKernelGGL(( mathKernel3), dim3(grid), dim3(block), 0, 0, d_C); hipDeviceSynchronize(); iElaps = std::chrono::system_clock::now() - iStart; nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count(); std::cout << "kernel3 <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl; // free gpu memory and reset device hipFree(d_C); hipDeviceReset(); return EXIT_SUCCESS; }
9bc0ac84776e6fb86c4230190294212717dce9a7.cu
#include <iostream> #include <stdio.h> #include <cuda_runtime.h> #include <chrono> __global__ void warmingup(float *c) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float a, b; a=b=0.0f; if (tid%2==0){ a=100.0f; } else { b=200.0f; } c[tid] = a+b; } __global__ void mathKernel1(float *c) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float a, b; a=b=0.0f; if (tid%2==0){ a=100.0f; } else { b=200.0f; } c[tid] = a+b; } __global__ void mathKernel2(float *c) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float a, b; a=b=0.0f; if (tid%2==0){ a=100.0f; } else { b=200.0f; } c[tid] = a+b; } __global__ void mathKernel3(float *c) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float a, b; a=b=0.0f; bool ipred = (tid&2==0); if (ipred){ a=100.0f; } else { b=200.0f; } c[tid] = a+b; } int main(int argc, char **argv) { // setup device int dev=0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); std::cout << argv[0] << " using device " << dev << ": " << deviceProp.name << std::endl; // set up data size int size = 64; int blocksize = 64; if(argc>1) blocksize = atoi(argv[1]); if(argc>2) blocksize = atoi(argv[2]); std::cout << "Data size " << size << std::endl; // set up execution configuration dim3 block (blocksize, 1); dim3 grid ((size+block.x-1)/block.x, 1); std::cout << "Execution configure (block " << block.x << " grid " << grid.x << ")" << std::endl; // allocate gpu memory float *d_C; size_t nBytes = size*sizeof(float); cudaMalloc((float**)&d_C, nBytes); // run a warmup kernel to remove overhead cudaDeviceSynchronize(); auto iStart = std::chrono::system_clock::now(); warmingup<<<grid, block>>> (d_C); cudaDeviceSynchronize(); auto iElaps = std::chrono::system_clock::now() - iStart; auto nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count(); std::cout << "warmup <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl; // run kernel 1 iStart = std::chrono::system_clock::now(); mathKernel1<<<grid, block>>> (d_C); cudaDeviceSynchronize(); iElaps = std::chrono::system_clock::now() - iStart; nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count(); std::cout << "kernel1 <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl; // run kernel 2 iStart = std::chrono::system_clock::now(); mathKernel2<<<grid, block>>> (d_C); cudaDeviceSynchronize(); iElaps = std::chrono::system_clock::now() - iStart; nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count(); std::cout << "kernel2 <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl; // run kernel 3 iStart = std::chrono::system_clock::now(); mathKernel3<<<grid, block>>> (d_C); cudaDeviceSynchronize(); iElaps = std::chrono::system_clock::now() - iStart; nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count(); std::cout << "kernel3 <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl; // free gpu memory and reset device cudaFree(d_C); cudaDeviceReset(); return EXIT_SUCCESS; }
bf61082b6da800d78c82bc2a003b693a1f2d36d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * What was improved? * - Removed malloc and changed them to cudaAllocHost * - Added hipHostMallocWriteCombined flag */ using namespace std; #include <cassert> #include <array> #include <fstream> #include <sstream> #include "stdio.h" #include "jbutil.h" //function to save output void saveOutput(float *ii, int rows, int cols, string filename, double t){ ofstream outputFile; filename = filename.substr(filename.find_last_of("/") + 1); filename = filename.substr(0, filename.size()-4); filename = filename+".txt"; string filename_to_save = "outputs/output_"+filename; outputFile.open(filename_to_save); for(size_t row = 0; row < rows; row++){ for(size_t col = 0; col < cols; col++){ outputFile << ii[row * cols +col] << " "; } outputFile << endl; } outputFile << "Time taken: " << t << "s" << endl; cout << "Result written to file" << endl; outputFile.close(); } //function to calculate row cumulative sums __global__ void cumulativeRowPass(int rows, int cols, float *ii) { //get row int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < rows){ //get row index int row_index = i * cols; //for each column for(int j=0; j < cols; j++){ //get index from array int index = row_index + j; //get previous value int prev_val = (j==0) ? 0 : ii[index-1]; ii[index] = prev_val + ii[index]; } } } //function to calculate column cumulative sums __global__ void cumulativeColumnPass(int rows, int cols, float *ii) { //get column index int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < cols){ //for each row in column for(int j=0; j < rows; j++){ //get index from array int index = j * cols + i; //get previous value float prev_val = (j==0) ? 0 : ii[index - cols]; ii[index] = prev_val + ii[index]; } } } int main(int argc, char *argv[]) { //check that file was passed if(argc < 2 ){ printf("Please pass in a filename\n"); return 1; } //get filename string filename = argv[1]; //get extension string ext = filename.substr(filename.size()-4, filename.size()); if(ext != ".pgm"){ cout << "Input must be a .pgm file" << endl; return 1; } //check cli argument to see whether to save output to file bool save = true; if(argc == 3){ save = argv[2] == "true" || argv[2] == "t"; } //read file jbutil::image<int> image_in; std::ifstream file_in(filename.c_str()); image_in.load(file_in); //get rows and cols int rows = image_in.get_rows(); int cols = image_in.get_cols(); const int size = rows * cols * sizeof(float); //initialise arrays float *ii; //allocate memory to host with write combined flag hipHostMalloc((void**)&ii, size, hipHostMallocWriteCombined); //fill array from image for(int row=0; row < rows; row++){ for (int col=0; col < cols; col++){ ii[row * cols + col] = image_in(0, row, col); } } float* dii; hipMalloc((void**)&dii, size); double totalTime = 0; // start timer double t = jbutil::gettime(); // Copy over input from host to device hipMemcpy(dii, ii, size, hipMemcpyHostToDevice); // stop timer t = jbutil::gettime() - t; printf("Time taken to copy from host to device: %fs\n", t); totalTime += t; int threadsInBlocks = 128; const int nblocks = (rows + (threadsInBlocks-1)) / threadsInBlocks; printf("Number of threads in blocks: %d\n", threadsInBlocks); printf("Number of blocks: %d\n", nblocks); // start timer t = jbutil::gettime(); //start kernels hipLaunchKernelGGL(( cumulativeRowPass), dim3(nblocks), dim3(threadsInBlocks), 0, 0, rows, cols, dii); hipLaunchKernelGGL(( cumulativeColumnPass), dim3(nblocks), dim3(threadsInBlocks), 0, 0, rows, cols, dii); // stop timer t = jbutil::gettime() - t; printf("Time taken to calculate integral image: %fs\n", t); totalTime += t; t = jbutil::gettime(); // Copy over output from device to host hipMemcpy(ii, dii, size, hipMemcpyDeviceToHost); // stop timer t = jbutil::gettime() - t; printf("Time taken to copy from device to host: %fs\n", t); totalTime += t; //output to file if save is true if(save){ saveOutput(ii, rows, cols, filename, totalTime); } printf("Total time taken: %fs\n", totalTime); hipHostFree(ii); //free device memory hipFree(dii); }
bf61082b6da800d78c82bc2a003b693a1f2d36d9.cu
/** * What was improved? * - Removed malloc and changed them to cudaAllocHost * - Added cudaHostAllocWriteCombined flag */ using namespace std; #include <cassert> #include <array> #include <fstream> #include <sstream> #include "stdio.h" #include "jbutil.h" //function to save output void saveOutput(float *ii, int rows, int cols, string filename, double t){ ofstream outputFile; filename = filename.substr(filename.find_last_of("/") + 1); filename = filename.substr(0, filename.size()-4); filename = filename+".txt"; string filename_to_save = "outputs/output_"+filename; outputFile.open(filename_to_save); for(size_t row = 0; row < rows; row++){ for(size_t col = 0; col < cols; col++){ outputFile << ii[row * cols +col] << " "; } outputFile << endl; } outputFile << "Time taken: " << t << "s" << endl; cout << "Result written to file" << endl; outputFile.close(); } //function to calculate row cumulative sums __global__ void cumulativeRowPass(int rows, int cols, float *ii) { //get row int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < rows){ //get row index int row_index = i * cols; //for each column for(int j=0; j < cols; j++){ //get index from array int index = row_index + j; //get previous value int prev_val = (j==0) ? 0 : ii[index-1]; ii[index] = prev_val + ii[index]; } } } //function to calculate column cumulative sums __global__ void cumulativeColumnPass(int rows, int cols, float *ii) { //get column index int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < cols){ //for each row in column for(int j=0; j < rows; j++){ //get index from array int index = j * cols + i; //get previous value float prev_val = (j==0) ? 0 : ii[index - cols]; ii[index] = prev_val + ii[index]; } } } int main(int argc, char *argv[]) { //check that file was passed if(argc < 2 ){ printf("Please pass in a filename\n"); return 1; } //get filename string filename = argv[1]; //get extension string ext = filename.substr(filename.size()-4, filename.size()); if(ext != ".pgm"){ cout << "Input must be a .pgm file" << endl; return 1; } //check cli argument to see whether to save output to file bool save = true; if(argc == 3){ save = argv[2] == "true" || argv[2] == "t"; } //read file jbutil::image<int> image_in; std::ifstream file_in(filename.c_str()); image_in.load(file_in); //get rows and cols int rows = image_in.get_rows(); int cols = image_in.get_cols(); const int size = rows * cols * sizeof(float); //initialise arrays float *ii; //allocate memory to host with write combined flag cudaHostAlloc((void**)&ii, size, cudaHostAllocWriteCombined); //fill array from image for(int row=0; row < rows; row++){ for (int col=0; col < cols; col++){ ii[row * cols + col] = image_in(0, row, col); } } float* dii; cudaMalloc((void**)&dii, size); double totalTime = 0; // start timer double t = jbutil::gettime(); // Copy over input from host to device cudaMemcpy(dii, ii, size, cudaMemcpyHostToDevice); // stop timer t = jbutil::gettime() - t; printf("Time taken to copy from host to device: %fs\n", t); totalTime += t; int threadsInBlocks = 128; const int nblocks = (rows + (threadsInBlocks-1)) / threadsInBlocks; printf("Number of threads in blocks: %d\n", threadsInBlocks); printf("Number of blocks: %d\n", nblocks); // start timer t = jbutil::gettime(); //start kernels cumulativeRowPass<<<nblocks, threadsInBlocks>>>(rows, cols, dii); cumulativeColumnPass<<<nblocks, threadsInBlocks>>>(rows, cols, dii); // stop timer t = jbutil::gettime() - t; printf("Time taken to calculate integral image: %fs\n", t); totalTime += t; t = jbutil::gettime(); // Copy over output from device to host cudaMemcpy(ii, dii, size, cudaMemcpyDeviceToHost); // stop timer t = jbutil::gettime() - t; printf("Time taken to copy from device to host: %fs\n", t); totalTime += t; //output to file if save is true if(save){ saveOutput(ii, rows, cols, filename, totalTime); } printf("Total time taken: %fs\n", totalTime); cudaFreeHost(ii); //free device memory cudaFree(dii); }
486f751dfc04917c9916ec70b6c210181cf22e39.hip
// !!! This is a file automatically generated by hipify!!! /** * @file rasterize.cu * @brief CUDA-accelerated rasterization pipeline. * @authors Skeleton code: Yining Karl Li, Kai Ninomiya * @date 2012-2015 * @copyright University of Pennsylvania & STUDENT */ #include "rasterize.h" #include <cmath> #include <cstdio> #include <hip/hip_runtime.h> #include <thrust/random.h> #include <util/checkCUDAError.h> #include <glm/glm.hpp> #include <glm/gtc/matrix_transform.hpp> struct VertexIn { glm::vec3 pos; glm::vec3 nor; glm::vec3 col; glm::vec2 uv; // TODO (optional) add other vertex attributes (e.g. texture coordinates) }; struct VertexOut { // TODO glm::vec3 pos; glm::vec3 nor; glm::vec3 col; glm::vec2 uv; }; struct Triangle { VertexOut v[3]; }; struct Fragment { glm::vec3 color; glm::vec3 position; glm::vec3 normal; int depth; }; __host__ __device__ inline unsigned int utilhash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } static int width = 0; static int height = 0; static int *dev_bufIdx = NULL; static VertexIn *dev_bufVertex = NULL; static VertexOut *dev_bufVertex_out = NULL; static Triangle *dev_primitives = NULL; static Fragment *dev_depthbuffer = NULL; static glm::vec3 *dev_framebuffer = NULL; static int bufIdxSize = 0; static int vertCount = 0; /** * Kernel that writes the image to the OpenGL PBO directly. */ __global__ void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 color; color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0; color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0; color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0; // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } // Writes fragment colors to the framebuffer __global__ void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { framebuffer[index] = depthbuffer[index].color; } } __global__ void depthBufferClearing(int w, int h, Fragment *fragments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if ( x < w && y < h) { fragments[index].depth = INT_MAX; fragments[index].color = glm::vec3(0.0f); } } __global__ void vertexShading(int n, glm::mat4 view_projection, VertexIn *vs_input, VertexOut *vs_output) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < n) { VertexOut vert_out; glm::vec4 input_pos = glm::vec4( vs_input[index].pos.x, vs_input[index].pos.y, vs_input[index].pos.z, 1.0f); glm::vec3 transformedPoint = multiplyMV(view_projection, input_pos); vert_out.pos = transformedPoint; glm::vec4 input_normal = glm::vec4( vs_input[index].nor.x, vs_input[index].nor.y, vs_input[index].nor.z, 1.0f); glm::vec3 output_normal = multiplyMV(view_projection,input_normal); vert_out.nor = output_normal; vert_out.col = vs_input[index].col; vs_output[index] = vert_out; } } __global__ void primitiveAssembling(int n, VertexOut *vs_output, Triangle *primitives) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < n) { primitives[index].v[0] = vs_output[3*index]; primitives[index].v[1] = vs_output[3*index+1]; primitives[index].v[2] = vs_output[3*index+2]; } } __global__ void rasterizing(int n, int w, int h, Triangle *primitives, Fragment *fs_input) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < n) { Triangle tri = primitives[index]; glm::vec3 tri_verts[3] = {tri.v[0].pos, tri.v[1].pos, tri.v[2].pos}; AABB aabb = getAABBForTriangle(tri_verts); glm::vec2 pixel_min; pixel_min.x = (aabb.min.x + 1) * w / 2.0f; pixel_min.y = (aabb.min.y + 1) * h / 2.0f; glm::vec2 pixel_max; pixel_max.x = (aabb.max.x + 1) * w / 2.0f; pixel_max.y = (aabb.max.y + 1) * h / 2.0f; for (int i = glm::max(0.0f, pixel_min.x); i <= pixel_max.x; i++) { for (int j = glm::max(0.0f, pixel_min.y); j <= pixel_max.y; j++) { //thrust::default_random_engine rng = makeSeededRandomEngine(0, index, 0); //thrust::uniform_real_distribution<float> u01(0, 1); float x = (i/float(w)) * 2.0f - 1; float y = (j/float(h)) * 2.0f - 1; glm::vec3 barycentric = calculateBarycentricCoordinate(tri_verts, glm::vec2(x,y)); if (isBarycentricCoordInBounds(barycentric)) { int frag_index = j*w + i; int depth = getZAtCoordinate(barycentric, tri_verts) * INT_MAX; atomicMin(&fs_input[frag_index].depth, depth); if(fs_input[frag_index].depth == depth) { Fragment frag; frag.color = (primitives[index].v[0].col + primitives[index].v[1].col + primitives[index].v[2].col) / 3.0f; frag.normal = (primitives[index].v[0].nor + primitives[index].v[1].nor + primitives[index].v[2].nor) / 3.0f; frag.position = barycentric; frag.depth = depth; fs_input[frag_index] = frag; } } } } } } __global__ void fragmentShading(int w, int h, Fragment *fs, glm::vec3 light_pos) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if ( x < w && y < h) { float diffuseTerm = 0.7f; glm::vec3 light_color = glm::vec3(1.0f); fs[index].color *= diffuseTerm * glm::max(0.0f, glm::dot(glm::normalize(fs[index].normal), glm::normalize(light_pos - fs[index].position))); } } /** * Called once at the beginning of the program to allocate memory. */ void rasterizeInit(int w, int h) { width = w; height = h; hipFree(dev_depthbuffer); hipMalloc(&dev_depthbuffer, width * height * sizeof(Fragment)); hipMemset(dev_depthbuffer, 0, width * height * sizeof(Fragment)); hipFree(dev_framebuffer); hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3)); hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3)); checkCUDAError("rasterizeInit"); } /** * Set all of the buffers necessary for rasterization. */ void rasterizeSetBuffers( int _bufIdxSize, int *bufIdx, int _vertCount, float *bufPos, float *bufNor, float *bufCol) { bufIdxSize = _bufIdxSize; vertCount = _vertCount; hipFree(dev_bufIdx); hipMalloc(&dev_bufIdx, bufIdxSize * sizeof(int)); hipMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), hipMemcpyHostToDevice); VertexIn *bufVertex = new VertexIn[_vertCount]; for (int i = 0; i < vertCount; i++) { int j = i * 3; bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]); bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]); bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]); } hipFree(dev_bufVertex); hipMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn)); hipMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), hipMemcpyHostToDevice); hipFree(dev_bufVertex_out); hipMalloc(&dev_bufVertex_out, vertCount * sizeof(VertexOut)); hipFree(dev_primitives); hipMalloc(&dev_primitives, vertCount / 3 * sizeof(Triangle)); hipMemset(dev_primitives, 0, vertCount / 3 * sizeof(Triangle)); checkCUDAError("rasterizeSetBuffers"); } /** * Perform rasterization. */ void rasterize(uchar4 *pbo) { int sideLength2d = 8; dim3 blockSize2d(sideLength2d, sideLength2d); dim3 blockCount2d((width - 1) / blockSize2d.x + 1, (height - 1) / blockSize2d.y + 1); dim3 blockSize1d(128); dim3 blockCount1d((vertCount + 128 - 1) / 128); //-----RATERIZATION PIPELINE---------- //---Clear Depth Buffer hipLaunchKernelGGL(( depthBufferClearing), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, dev_depthbuffer); checkCUDAError("depth buffer clearing"); //---Vertex Shader //view matrix glm::mat4 view = glm::lookAt( glm::vec3(0.0f, 1.5f, 5.0f), glm::vec3(0.0f, 0.0f, -1.0f), glm::vec3(0.0f, -1.0f, 0.0f)); //projection matrix glm::mat4 projection = glm::perspective( 20.0f, float(width)/float(height), 1.0f, 100.0f); glm::mat4 view_projection = projection * view; hipLaunchKernelGGL(( vertexShading), dim3(blockCount1d), dim3(blockSize1d), 0, 0, vertCount, view_projection, dev_bufVertex, dev_bufVertex_out); checkCUDAError("vertex shader"); //---Primitive Assembly hipLaunchKernelGGL(( primitiveAssembling), dim3(blockCount1d), dim3(blockSize1d), 0, 0, vertCount/3, dev_bufVertex_out, dev_primitives); checkCUDAError("primitive assembling"); //---Rasterization hipLaunchKernelGGL(( rasterizing), dim3(blockCount1d), dim3(blockSize1d), 0, 0, vertCount/3, width, height, dev_primitives, dev_depthbuffer); checkCUDAError("triangle rasterizing"); //--Fragment Shader glm::vec3 light_pos = glm::vec3(-3.0f, 5.0f, 10.0f); hipLaunchKernelGGL(( fragmentShading), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, dev_depthbuffer, light_pos); checkCUDAError("fragment shading"); // Copy depthbuffer colors into framebuffer hipLaunchKernelGGL(( render), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, dev_depthbuffer, dev_framebuffer); // Copy framebuffer into OpenGL buffer for OpenGL previewing hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer); checkCUDAError("rasterize"); } /** * Called once at the end of the program to free CUDA memory. */ void rasterizeFree() { hipFree(dev_bufIdx); dev_bufIdx = NULL; hipFree(dev_bufVertex); dev_bufVertex = NULL; hipFree(dev_bufVertex_out); dev_bufVertex_out = NULL; hipFree(dev_primitives); dev_primitives = NULL; hipFree(dev_depthbuffer); dev_depthbuffer = NULL; hipFree(dev_framebuffer); dev_framebuffer = NULL; checkCUDAError("rasterizeFree"); }
486f751dfc04917c9916ec70b6c210181cf22e39.cu
/** * @file rasterize.cu * @brief CUDA-accelerated rasterization pipeline. * @authors Skeleton code: Yining Karl Li, Kai Ninomiya * @date 2012-2015 * @copyright University of Pennsylvania & STUDENT */ #include "rasterize.h" #include <cmath> #include <cstdio> #include <cuda.h> #include <thrust/random.h> #include <util/checkCUDAError.h> #include <glm/glm.hpp> #include <glm/gtc/matrix_transform.hpp> struct VertexIn { glm::vec3 pos; glm::vec3 nor; glm::vec3 col; glm::vec2 uv; // TODO (optional) add other vertex attributes (e.g. texture coordinates) }; struct VertexOut { // TODO glm::vec3 pos; glm::vec3 nor; glm::vec3 col; glm::vec2 uv; }; struct Triangle { VertexOut v[3]; }; struct Fragment { glm::vec3 color; glm::vec3 position; glm::vec3 normal; int depth; }; __host__ __device__ inline unsigned int utilhash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } static int width = 0; static int height = 0; static int *dev_bufIdx = NULL; static VertexIn *dev_bufVertex = NULL; static VertexOut *dev_bufVertex_out = NULL; static Triangle *dev_primitives = NULL; static Fragment *dev_depthbuffer = NULL; static glm::vec3 *dev_framebuffer = NULL; static int bufIdxSize = 0; static int vertCount = 0; /** * Kernel that writes the image to the OpenGL PBO directly. */ __global__ void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 color; color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0; color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0; color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0; // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } // Writes fragment colors to the framebuffer __global__ void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { framebuffer[index] = depthbuffer[index].color; } } __global__ void depthBufferClearing(int w, int h, Fragment *fragments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if ( x < w && y < h) { fragments[index].depth = INT_MAX; fragments[index].color = glm::vec3(0.0f); } } __global__ void vertexShading(int n, glm::mat4 view_projection, VertexIn *vs_input, VertexOut *vs_output) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < n) { VertexOut vert_out; glm::vec4 input_pos = glm::vec4( vs_input[index].pos.x, vs_input[index].pos.y, vs_input[index].pos.z, 1.0f); glm::vec3 transformedPoint = multiplyMV(view_projection, input_pos); vert_out.pos = transformedPoint; glm::vec4 input_normal = glm::vec4( vs_input[index].nor.x, vs_input[index].nor.y, vs_input[index].nor.z, 1.0f); glm::vec3 output_normal = multiplyMV(view_projection,input_normal); vert_out.nor = output_normal; vert_out.col = vs_input[index].col; vs_output[index] = vert_out; } } __global__ void primitiveAssembling(int n, VertexOut *vs_output, Triangle *primitives) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < n) { primitives[index].v[0] = vs_output[3*index]; primitives[index].v[1] = vs_output[3*index+1]; primitives[index].v[2] = vs_output[3*index+2]; } } __global__ void rasterizing(int n, int w, int h, Triangle *primitives, Fragment *fs_input) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < n) { Triangle tri = primitives[index]; glm::vec3 tri_verts[3] = {tri.v[0].pos, tri.v[1].pos, tri.v[2].pos}; AABB aabb = getAABBForTriangle(tri_verts); glm::vec2 pixel_min; pixel_min.x = (aabb.min.x + 1) * w / 2.0f; pixel_min.y = (aabb.min.y + 1) * h / 2.0f; glm::vec2 pixel_max; pixel_max.x = (aabb.max.x + 1) * w / 2.0f; pixel_max.y = (aabb.max.y + 1) * h / 2.0f; for (int i = glm::max(0.0f, pixel_min.x); i <= pixel_max.x; i++) { for (int j = glm::max(0.0f, pixel_min.y); j <= pixel_max.y; j++) { //thrust::default_random_engine rng = makeSeededRandomEngine(0, index, 0); //thrust::uniform_real_distribution<float> u01(0, 1); float x = (i/float(w)) * 2.0f - 1; float y = (j/float(h)) * 2.0f - 1; glm::vec3 barycentric = calculateBarycentricCoordinate(tri_verts, glm::vec2(x,y)); if (isBarycentricCoordInBounds(barycentric)) { int frag_index = j*w + i; int depth = getZAtCoordinate(barycentric, tri_verts) * INT_MAX; atomicMin(&fs_input[frag_index].depth, depth); if(fs_input[frag_index].depth == depth) { Fragment frag; frag.color = (primitives[index].v[0].col + primitives[index].v[1].col + primitives[index].v[2].col) / 3.0f; frag.normal = (primitives[index].v[0].nor + primitives[index].v[1].nor + primitives[index].v[2].nor) / 3.0f; frag.position = barycentric; frag.depth = depth; fs_input[frag_index] = frag; } } } } } } __global__ void fragmentShading(int w, int h, Fragment *fs, glm::vec3 light_pos) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if ( x < w && y < h) { float diffuseTerm = 0.7f; glm::vec3 light_color = glm::vec3(1.0f); fs[index].color *= diffuseTerm * glm::max(0.0f, glm::dot(glm::normalize(fs[index].normal), glm::normalize(light_pos - fs[index].position))); } } /** * Called once at the beginning of the program to allocate memory. */ void rasterizeInit(int w, int h) { width = w; height = h; cudaFree(dev_depthbuffer); cudaMalloc(&dev_depthbuffer, width * height * sizeof(Fragment)); cudaMemset(dev_depthbuffer, 0, width * height * sizeof(Fragment)); cudaFree(dev_framebuffer); cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3)); cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3)); checkCUDAError("rasterizeInit"); } /** * Set all of the buffers necessary for rasterization. */ void rasterizeSetBuffers( int _bufIdxSize, int *bufIdx, int _vertCount, float *bufPos, float *bufNor, float *bufCol) { bufIdxSize = _bufIdxSize; vertCount = _vertCount; cudaFree(dev_bufIdx); cudaMalloc(&dev_bufIdx, bufIdxSize * sizeof(int)); cudaMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), cudaMemcpyHostToDevice); VertexIn *bufVertex = new VertexIn[_vertCount]; for (int i = 0; i < vertCount; i++) { int j = i * 3; bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]); bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]); bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]); } cudaFree(dev_bufVertex); cudaMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn)); cudaMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), cudaMemcpyHostToDevice); cudaFree(dev_bufVertex_out); cudaMalloc(&dev_bufVertex_out, vertCount * sizeof(VertexOut)); cudaFree(dev_primitives); cudaMalloc(&dev_primitives, vertCount / 3 * sizeof(Triangle)); cudaMemset(dev_primitives, 0, vertCount / 3 * sizeof(Triangle)); checkCUDAError("rasterizeSetBuffers"); } /** * Perform rasterization. */ void rasterize(uchar4 *pbo) { int sideLength2d = 8; dim3 blockSize2d(sideLength2d, sideLength2d); dim3 blockCount2d((width - 1) / blockSize2d.x + 1, (height - 1) / blockSize2d.y + 1); dim3 blockSize1d(128); dim3 blockCount1d((vertCount + 128 - 1) / 128); //-----RATERIZATION PIPELINE---------- //---Clear Depth Buffer depthBufferClearing<<<blockCount2d, blockSize2d>>>(width, height, dev_depthbuffer); checkCUDAError("depth buffer clearing"); //---Vertex Shader //view matrix glm::mat4 view = glm::lookAt( glm::vec3(0.0f, 1.5f, 5.0f), glm::vec3(0.0f, 0.0f, -1.0f), glm::vec3(0.0f, -1.0f, 0.0f)); //projection matrix glm::mat4 projection = glm::perspective( 20.0f, float(width)/float(height), 1.0f, 100.0f); glm::mat4 view_projection = projection * view; vertexShading<<<blockCount1d, blockSize1d>>>(vertCount, view_projection, dev_bufVertex, dev_bufVertex_out); checkCUDAError("vertex shader"); //---Primitive Assembly primitiveAssembling<<<blockCount1d, blockSize1d>>>(vertCount/3, dev_bufVertex_out, dev_primitives); checkCUDAError("primitive assembling"); //---Rasterization rasterizing<<<blockCount1d, blockSize1d>>>(vertCount/3, width, height, dev_primitives, dev_depthbuffer); checkCUDAError("triangle rasterizing"); //--Fragment Shader glm::vec3 light_pos = glm::vec3(-3.0f, 5.0f, 10.0f); fragmentShading<<<blockCount2d, blockSize2d>>>(width, height, dev_depthbuffer, light_pos); checkCUDAError("fragment shading"); // Copy depthbuffer colors into framebuffer render<<<blockCount2d, blockSize2d>>>(width, height, dev_depthbuffer, dev_framebuffer); // Copy framebuffer into OpenGL buffer for OpenGL previewing sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer); checkCUDAError("rasterize"); } /** * Called once at the end of the program to free CUDA memory. */ void rasterizeFree() { cudaFree(dev_bufIdx); dev_bufIdx = NULL; cudaFree(dev_bufVertex); dev_bufVertex = NULL; cudaFree(dev_bufVertex_out); dev_bufVertex_out = NULL; cudaFree(dev_primitives); dev_primitives = NULL; cudaFree(dev_depthbuffer); dev_depthbuffer = NULL; cudaFree(dev_framebuffer); dev_framebuffer = NULL; checkCUDAError("rasterizeFree"); }
6ef62d45bcbcf884e209a452d32fbfd073b357f3.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <assert.h> #include <hip/hip_runtime.h> #include <math.h> #include <bits/stdc++.h> using namespace std; #define isValid(X, Y) (X >= 0 && Y>=0 && X < M && Y < N) __global__ void image_bluring(float* a, float* b, int M, int N) { //__shared__ float[16][16][3]; int global_x = blockDim.x * blockIdx.x + threadIdx.x ; int global_y = blockDim.y * blockIdx.y + threadIdx.y ; float channel1 = 0, channel2 = 0, channel3 = 0; int count = 0; for(int i = global_x - 1; i <= global_x + 1; i++) { for(int j = global_y - 1; j <= global_y + 1; j++){ if(isValid(j,i)) { //printf("%f\n",a[(j*N+i)*3]); channel1 += a[(j*N + i)*3]; channel2 += a[(j*N + i)*3 + 1]; channel3 += a[(j*N + i)*3 + 2]; count++; } } } channel1 = channel1 / count; channel2 = channel2 / count; channel3 = channel3 / count; //printf("%f\n",channel1); b[(global_y * N + global_x)*3 ] = channel1; b[(global_y * N + global_x)*3 + 1] = channel2; b[(global_y * N + global_x)*3 + 2] = channel3; printf("%f\n",b[(global_y*N+global_x)*3]); } void handle_error(hipError_t error) { if (error != hipSuccess) { std::cout << "Cuda Error. Exiting..."; exit(0); } } void initialise_matrix(int M, int N, float A[]) { for(int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { for (int k = 0; k<3; k++) { A[(i*N + j )*3 + k ] = 1.0; } } } } void get_kernel(float K[][3]) { for(int i=0; i < 3; i++) { for(int j=0; j < 3;j++) { K[i][j] = 1.0/9.0; } } } void print(float a[]) { for(int k=0;k<3;k++) { for(int i=0;i<16;i++) { for(int j=0;j<16;j++) cout<<a[(i*16+j)*3+k]<< " " ; cout<<endl; } cout<<endl; } cout<<endl<<endl; } int main() { float image[16*16*3]; float result[16*16*3]; float kernel[3][3]; initialise_matrix(16,16,image); get_kernel(kernel); float *I, *R; size_t size = 16 * 16 * 3 * sizeof(float); handle_error(hipMalloc((void**) &I, size)); handle_error(hipMalloc((void**) &R, size)); hipMemcpy(I,image,size,hipMemcpyHostToDevice); dim3 grid_dim(1,1,1); dim3 block_dim(16,16,1); hipLaunchKernelGGL(( image_bluring), dim3(grid_dim), dim3(block_dim), 0, 0, I, R, 16, 16); hipMemcpy(result, R,size,hipMemcpyDeviceToHost); print(result); }
6ef62d45bcbcf884e209a452d32fbfd073b357f3.cu
#include <iostream> #include <assert.h> #include <cuda.h> #include <math.h> #include <bits/stdc++.h> using namespace std; #define isValid(X, Y) (X >= 0 && Y>=0 && X < M && Y < N) __global__ void image_bluring(float* a, float* b, int M, int N) { //__shared__ float[16][16][3]; int global_x = blockDim.x * blockIdx.x + threadIdx.x ; int global_y = blockDim.y * blockIdx.y + threadIdx.y ; float channel1 = 0, channel2 = 0, channel3 = 0; int count = 0; for(int i = global_x - 1; i <= global_x + 1; i++) { for(int j = global_y - 1; j <= global_y + 1; j++){ if(isValid(j,i)) { //printf("%f\n",a[(j*N+i)*3]); channel1 += a[(j*N + i)*3]; channel2 += a[(j*N + i)*3 + 1]; channel3 += a[(j*N + i)*3 + 2]; count++; } } } channel1 = channel1 / count; channel2 = channel2 / count; channel3 = channel3 / count; //printf("%f\n",channel1); b[(global_y * N + global_x)*3 ] = channel1; b[(global_y * N + global_x)*3 + 1] = channel2; b[(global_y * N + global_x)*3 + 2] = channel3; printf("%f\n",b[(global_y*N+global_x)*3]); } void handle_error(cudaError_t error) { if (error != cudaSuccess) { std::cout << "Cuda Error. Exiting..."; exit(0); } } void initialise_matrix(int M, int N, float A[]) { for(int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { for (int k = 0; k<3; k++) { A[(i*N + j )*3 + k ] = 1.0; } } } } void get_kernel(float K[][3]) { for(int i=0; i < 3; i++) { for(int j=0; j < 3;j++) { K[i][j] = 1.0/9.0; } } } void print(float a[]) { for(int k=0;k<3;k++) { for(int i=0;i<16;i++) { for(int j=0;j<16;j++) cout<<a[(i*16+j)*3+k]<< " " ; cout<<endl; } cout<<endl; } cout<<endl<<endl; } int main() { float image[16*16*3]; float result[16*16*3]; float kernel[3][3]; initialise_matrix(16,16,image); get_kernel(kernel); float *I, *R; size_t size = 16 * 16 * 3 * sizeof(float); handle_error(cudaMalloc((void**) &I, size)); handle_error(cudaMalloc((void**) &R, size)); cudaMemcpy(I,image,size,cudaMemcpyHostToDevice); dim3 grid_dim(1,1,1); dim3 block_dim(16,16,1); image_bluring<<<grid_dim, block_dim>>> (I, R, 16, 16); cudaMemcpy(result, R,size,cudaMemcpyDeviceToHost); print(result); }
84ed6aacdd12761ae18752ed84526f0a02bd9a2c.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdio.h> using namespace cv; using namespace std; #define CHECK(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ cout<< "Error:" << hipGetErrorString(_m_cudaStat) \ << " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \ exit(1); \ } } __global__ void cudoCalculculate(unsigned char* image,int res){ int i = 3 * (threadIdx.x + blockIdx.x * blockDim.x); int j = 3 * (threadIdx.x + 1 + blockIdx.x * blockDim.x); if (j > res) return; image[i] = image[i + 1] = image[i + 2] = (sqrtf(image[j] - image[i]) + sqrtf(image[j+1] - image[i+1]) + sqrtf(image[j+2] - image[i+2] ))*20; } void CPU() { Mat image; image = cv::imread("pic.jpg", cv::IMREAD_COLOR); // Read the file CV_LOAD_IMAGE_COLOR if (!image.data) // Check for invalid input { cout << "Could not open or find the image" << std::endl; return; } Mat result = image.clone(); clock_t start = clock(); for (int i = 0; i < image.rows - 1; i++) { //pointer to 1st pixel in row Vec3b* p = image.ptr<Vec3b>(i); Vec3b* p1 = image.ptr<Vec3b>(i + 1); Vec3b* p_r = result.ptr<Vec3b>(i); for (int j = 0; j < image.cols - 1; j++) //for (int ch = 0; ch < 3; ch++) p_r[j][0] = p_r[j][1] = p_r[j][2] = sqrt(pow(p[j + 1][0] - p[j][0], 2) + pow(p[j + 1][1] - p[j][1], 2) + pow(p[j + 1][2] - p[j][2], 2) + pow(p1[j][0] - p[j][0], 2) + pow(p1[j][1] - p[j][1], 2) + pow(p1[j][2] - p[j][2], 2) ) * 20; } clock_t end = clock(); double seconds = (double)(end - start) / CLOCKS_PER_SEC; cout << "CPU time: " << seconds * 1000 << "ms" << endl; imwrite("pic2.jpg", result); //show image namedWindow("Display window", WINDOW_AUTOSIZE);// Create a window for display. imshow("Display window", result); // Show our image inside it. cv::waitKey(0);// Wait for a keystroke in the window } void GPU() { Mat image; image = cv::imread("pic.jpg", cv::IMREAD_COLOR); // Read the file CV_LOAD_IMAGE_COLOR if (!image.data) // Check for invalid input { cout << "Could not open or find the image" << std::endl; return; } unsigned char* imageGray; int full_size_image = image.rows * image.cols * 3; hipEvent_t startCUDA, stopCUDA; float elapsedTimeCUDA; hipEventCreate(&startCUDA); hipEventCreate(&stopCUDA); CHECK(hipMalloc(&imageGray, full_size_image)); CHECK(hipMemcpy(imageGray, image.data, full_size_image, hipMemcpyHostToDevice)); hipEventRecord(startCUDA, 0); hipLaunchKernelGGL(( cudoCalculculate) , dim3((full_size_image / 3 + 255) / 256), dim3(256) , 0, 0, imageGray, full_size_image); hipEventRecord(stopCUDA, 0); hipEventSynchronize(stopCUDA); CHECK(hipGetLastError()); hipEventElapsedTime(&elapsedTimeCUDA, startCUDA, stopCUDA); cout << "CUDA sum time = " << elapsedTimeCUDA << " ms\n"; cout << "CUDA memory throughput = " << 3 * full_size_image * sizeof(float) / elapsedTimeCUDA / 1024 / 1024 / 1.024 << " Gb/s\n"; CHECK(hipMemcpy(image.data, imageGray, full_size_image, hipMemcpyDeviceToHost)); CHECK(hipFree(imageGray)); imwrite("pic2GPU.jpg", image); namedWindow("Display window", WINDOW_AUTOSIZE);// Create a window for display. imshow("Display window", image); // Show our image inside it. waitKey(0); } int main( int argc, char** argv ) { GPU(); CPU(); return 0; }
84ed6aacdd12761ae18752ed84526f0a02bd9a2c.cu
#include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdio.h> using namespace cv; using namespace std; #define CHECK(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \ << " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \ exit(1); \ } } __global__ void cudoCalculculate(unsigned char* image,int res){ int i = 3 * (threadIdx.x + blockIdx.x * blockDim.x); int j = 3 * (threadIdx.x + 1 + blockIdx.x * blockDim.x); if (j > res) return; image[i] = image[i + 1] = image[i + 2] = (sqrtf(image[j] - image[i]) + sqrtf(image[j+1] - image[i+1]) + sqrtf(image[j+2] - image[i+2] ))*20; } void CPU() { Mat image; image = cv::imread("pic.jpg", cv::IMREAD_COLOR); // Read the file CV_LOAD_IMAGE_COLOR if (!image.data) // Check for invalid input { cout << "Could not open or find the image" << std::endl; return; } Mat result = image.clone(); clock_t start = clock(); for (int i = 0; i < image.rows - 1; i++) { //pointer to 1st pixel in row Vec3b* p = image.ptr<Vec3b>(i); Vec3b* p1 = image.ptr<Vec3b>(i + 1); Vec3b* p_r = result.ptr<Vec3b>(i); for (int j = 0; j < image.cols - 1; j++) //for (int ch = 0; ch < 3; ch++) p_r[j][0] = p_r[j][1] = p_r[j][2] = sqrt(pow(p[j + 1][0] - p[j][0], 2) + pow(p[j + 1][1] - p[j][1], 2) + pow(p[j + 1][2] - p[j][2], 2) + pow(p1[j][0] - p[j][0], 2) + pow(p1[j][1] - p[j][1], 2) + pow(p1[j][2] - p[j][2], 2) ) * 20; } clock_t end = clock(); double seconds = (double)(end - start) / CLOCKS_PER_SEC; cout << "CPU time: " << seconds * 1000 << "ms" << endl; imwrite("pic2.jpg", result); //show image namedWindow("Display window", WINDOW_AUTOSIZE);// Create a window for display. imshow("Display window", result); // Show our image inside it. cv::waitKey(0);// Wait for a keystroke in the window } void GPU() { Mat image; image = cv::imread("pic.jpg", cv::IMREAD_COLOR); // Read the file CV_LOAD_IMAGE_COLOR if (!image.data) // Check for invalid input { cout << "Could not open or find the image" << std::endl; return; } unsigned char* imageGray; int full_size_image = image.rows * image.cols * 3; cudaEvent_t startCUDA, stopCUDA; float elapsedTimeCUDA; cudaEventCreate(&startCUDA); cudaEventCreate(&stopCUDA); CHECK(cudaMalloc(&imageGray, full_size_image)); CHECK(cudaMemcpy(imageGray, image.data, full_size_image, cudaMemcpyHostToDevice)); cudaEventRecord(startCUDA, 0); cudoCalculculate <<<(full_size_image / 3 + 255) / 256, 256 >>> (imageGray, full_size_image); cudaEventRecord(stopCUDA, 0); cudaEventSynchronize(stopCUDA); CHECK(cudaGetLastError()); cudaEventElapsedTime(&elapsedTimeCUDA, startCUDA, stopCUDA); cout << "CUDA sum time = " << elapsedTimeCUDA << " ms\n"; cout << "CUDA memory throughput = " << 3 * full_size_image * sizeof(float) / elapsedTimeCUDA / 1024 / 1024 / 1.024 << " Gb/s\n"; CHECK(cudaMemcpy(image.data, imageGray, full_size_image, cudaMemcpyDeviceToHost)); CHECK(cudaFree(imageGray)); imwrite("pic2GPU.jpg", image); namedWindow("Display window", WINDOW_AUTOSIZE);// Create a window for display. imshow("Display window", image); // Show our image inside it. waitKey(0); } int main( int argc, char** argv ) { GPU(); CPU(); return 0; }
05cf1b780761bc096462914b9c7663a61ffa703d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdlib.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/sort.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include "NVStrings.h" #include "NVStringsImpl.h" #include "custring_view.cuh" #include "regex/regex.cuh" #include "regex/backref.h" #include "unicode/is_flags.h" #include "Timing.h" #include "util.h" // NVStrings* NVStrings::slice_replace( const char* repl, int start, int stop ) { if( !repl ) return 0; auto execpol = rmm::exec_policy(0); unsigned int replen = (unsigned int)strlen(repl); char* d_repl = 0; RMM_ALLOC(&d_repl,replen,0); hipMemcpy(d_repl,repl,replen,hipMemcpyHostToDevice); // compute size of output buffer unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> lengths(count,0); size_t* d_lengths = lengths.data().get(); double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_repl, replen, start, stop, d_lengths] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; unsigned int len = 0; if( start < dstr->chars_count() ) len = dstr->replace_size((unsigned)start,(unsigned)(stop-start),d_repl,replen); else { // another odd pandas case: if out-of-bounds, just append int bytes = dstr->size() + replen; int nchars = dstr->chars_count() + custring_view::chars_in_string(d_repl,replen); len = custring_view::alloc_size(bytes,nchars); } len = ALIGN_SIZE(len); d_lengths[idx] = (size_t)len; }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if( d_buffer==0 ) { if( d_repl ) RMM_FREE(d_repl,0); return rtn; } double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin()); // do the slice and replace custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count, [d_strings, d_repl, replen, start, stop, d_buffer, d_offsets, d_results] __device__(size_t idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; custring_view* dout = 0; if( start < dstr->chars_count() ) dout = dstr->replace((unsigned)start,(unsigned)(stop-start),d_repl,replen,buffer); else { // append for pandas consistency int bytes = dstr->size(); char* ptr = buffer; memcpy( ptr, dstr->data(), bytes ); ptr += bytes; memcpy( ptr, d_repl, replen ); bytes += replen; dout = custring_view::create_from(buffer,buffer,bytes); } d_results[idx] = dout; }); // hipError_t err = hipDeviceSynchronize(); double et2 = GetTime(); if( err != hipSuccess ) { fprintf(stderr,"nvs-slice_replace(%s,%d,%d)\n",repl,start,stop); printCudaError(err); } if( d_repl ) RMM_FREE(d_repl,0); pImpl->addOpTimes("slice_replace",(et1-st1),(et2-st2)); return rtn; } // this should replace multiple occurrences up to maxrepl NVStrings* NVStrings::replace( const char* str, const char* repl, int maxrepl ) { if( !str || !*str ) return 0; // null and empty string not allowed auto execpol = rmm::exec_policy(0); unsigned int ssz = (unsigned int)strlen(str); char* d_str = 0; RMM_ALLOC(&d_str,ssz,0); hipMemcpy(d_str,str,ssz,hipMemcpyHostToDevice); unsigned int sszch = custring_view::chars_in_string(str,ssz); if( !repl ) repl = ""; unsigned int rsz = (unsigned int)strlen(repl); char* d_repl = 0; RMM_ALLOC(&d_repl,rsz,0); hipMemcpy(d_repl,repl,rsz,hipMemcpyHostToDevice); unsigned int rszch = custring_view::chars_in_string(repl,rsz); // compute size of the output unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_str, ssz, sszch, d_repl, rsz, rszch, maxrepl, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int mxn = maxrepl; if( mxn < 0 ) mxn = dstr->chars_count(); //max possible replaces for this string unsigned int bytes = dstr->size(), nchars = dstr->chars_count(); int pos = dstr->find(d_str,ssz); // counting bytes and chars while((pos >= 0) && (mxn > 0)) { bytes += rsz - ssz; nchars += rszch - sszch; pos = dstr->find(d_str,ssz,(unsigned)pos+sszch); // next one --mxn; } unsigned int size = custring_view::alloc_size(bytes,nchars); d_sizes[idx] = ALIGN_SIZE(size); }); // // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) { RMM_FREE(d_str,0); RMM_FREE(d_repl,0); return rtn; // all strings are null } double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the thing custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_str, ssz, sszch, d_repl, rsz, d_buffer, d_offsets, maxrepl, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int mxn = maxrepl; if( mxn < 0 ) mxn = dstr->chars_count(); //max possible replaces for this string // char* buffer = d_buffer + d_offsets[idx]; char* sptr = dstr->data(); char* optr = buffer; unsigned int size = dstr->size(); int pos = dstr->find(d_str,ssz), lpos=0; while((pos >= 0) && (mxn > 0)) { // i:bbbbsssseeee int spos = dstr->byte_offset_for(pos); // ^ memcpy(optr,sptr+lpos,spos-lpos); // o:bbbb optr += spos - lpos; // ^ memcpy(optr,d_repl,rsz); // o:bbbbrrrr optr += rsz; // ^ lpos = spos + ssz; // i:bbbbsssseeee pos = dstr->find(d_str,ssz,pos+sszch); // ^ --mxn; } memcpy(optr,sptr+lpos,size-lpos); // o:bbbbrrrreeee unsigned int nsz = (unsigned int)(optr - buffer) + size - lpos; d_results[idx] = custring_view::create_from(buffer,buffer,nsz); }); // hipError_t err = hipDeviceSynchronize(); double et2 = GetTime(); if( err != hipSuccess ) { fprintf(stderr,"nvs-replace(%s,%s,%d)\n",str,repl,maxrepl); printCudaError(err); } pImpl->addOpTimes("replace",(et1-st1),(et2-st2)); RMM_FREE(d_str,0); RMM_FREE(d_repl,0); return rtn; } // same as above except parameter is regex NVStrings* NVStrings::replace_re( const char* pattern, const char* repl, int maxrepl ) { if( !pattern || !*pattern ) return 0; // null and empty string not allowed unsigned int count = size(); if( count==0 ) return new NVStrings(count); auto execpol = rmm::exec_policy(0); // compile regex into device object const char32_t* ptn32 = to_char32(pattern); dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count); delete ptn32; // // copy replace string to device memory if( !repl ) repl = ""; unsigned int rsz = (unsigned int)strlen(repl); char* d_repl = 0; RMM_ALLOC(&d_repl,rsz,0); hipMemcpy(d_repl,repl,rsz,hipMemcpyHostToDevice); unsigned int rszch = custring_view::chars_in_string(repl,rsz); // compute size of the output custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [prog, d_strings, d_repl, rsz, rszch, maxrepl, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int mxn = maxrepl; if( mxn < 0 ) mxn = dstr->chars_count(); //max possible replaces for this string unsigned int bytes = dstr->size(), nchars = dstr->chars_count(); int begin = 0, end = (int)nchars; int result = prog->find(idx,dstr,begin,end); while((result > 0) && (mxn > 0)) { bytes += rsz - (dstr->byte_offset_for(end)-dstr->byte_offset_for(begin)); nchars += rszch - (end-begin); begin = end; end = (int)nchars; result = prog->find(idx,dstr,begin,end); // next one --mxn; } unsigned int size = custring_view::alloc_size(bytes,nchars); d_sizes[idx] = ALIGN_SIZE(size); }); // // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) { dreprog::destroy(prog); RMM_FREE(d_repl,0); return rtn; // all strings are null } double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the replace custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [prog, d_strings, d_repl, rsz, d_buffer, d_offsets, maxrepl, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int mxn = maxrepl; int nchars = (int)dstr->chars_count(); if( mxn < 0 ) mxn = nchars; //max possible replaces for this string char* buffer = d_buffer + d_offsets[idx]; // output buffer char* sptr = dstr->data(); // input buffer char* optr = buffer; // running output pointer unsigned int size = dstr->size(); // number of byte in input string int lpos = 0, begin = 0, end = nchars; // working vars // copy input to output replacing strings as we go int result = prog->find(idx,dstr,begin,end); while((result > 0) && (mxn > 0)) { // i:bbbbsssseeee int spos = dstr->byte_offset_for(begin); // ^ memcpy(optr,sptr+lpos,spos-lpos); // o:bbbb optr += spos - lpos; // ^ memcpy(optr,d_repl,rsz); // o:bbbbrrrr optr += rsz; // ^ lpos = dstr->byte_offset_for(end); // i:bbbbsssseeee begin = end; // ^ end = nchars; result = prog->find(idx,dstr,begin,end); --mxn; } // copy the rest: memcpy(optr,sptr+lpos,size-lpos); // o:bbbbrrrreeee unsigned int nsz = (unsigned int)(optr - buffer) + size - lpos; d_results[idx] = custring_view::create_from(buffer,buffer,nsz); }); // hipError_t err = hipDeviceSynchronize(); double et2 = GetTime(); if( err != hipSuccess ) { fprintf(stderr,"nvs-replace_re(%s,%s,%d)\n",pattern,repl,maxrepl); printCudaError(err); } pImpl->addOpTimes("replace_re",(et1-st1),(et2-st2)); // dreprog::destroy(prog); RMM_FREE(d_repl,0); return rtn; } // not even close to the others NVStrings* NVStrings::replace_with_backrefs( const char* pattern, const char* repl ) { if( !pattern || !*pattern ) return 0; // null and empty string not allowed unsigned int count = size(); if( count==0 || repl==0 ) return new NVStrings(count); // returns all nulls auto execpol = rmm::exec_policy(0); // compile regex into device object const char32_t* ptn32 = to_char32(pattern); dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count); delete ptn32; // // parse the repl string for backref indicators std::vector<thrust::pair<int,int> > brefs; std::string srepl = parse_backrefs(repl,brefs); unsigned int rsz = (unsigned int)srepl.size(); char* d_repl = 0; RMM_ALLOC(&d_repl,rsz,0); hipMemcpy(d_repl,srepl.c_str(),rsz,hipMemcpyHostToDevice); unsigned int rszch = custring_view::chars_in_string(srepl.c_str(),rsz); rmm::device_vector<thrust::pair<int,int> > dbrefs(brefs); auto d_brefs = dbrefs.data().get(); unsigned int refcount = (unsigned int)dbrefs.size(); // compute size of the output custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [prog, d_strings, rsz, rszch, d_brefs, refcount, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; unsigned int bytes = rsz, nchars = rszch; // start with template int begin = 0, end = (int)dstr->chars_count(); // constants if( prog->find(idx,dstr,begin,end) > 0 ) { for( unsigned int j=0; j < refcount; ++j ) // eval each ref { int refidx = d_brefs[j].first; // backref indicator int spos=begin, epos=end; // modified by extract if( (prog->extract(idx,dstr,spos,epos,refidx-1)<=0) || (epos <= spos) ) continue; // no value for this ref nchars += epos - spos; // add up chars spos = dstr->byte_offset_for(spos); // convert to bytes bytes += dstr->byte_offset_for(epos) - spos; // add up bytes } } unsigned int size = custring_view::alloc_size(bytes,nchars); d_sizes[idx] = ALIGN_SIZE(size); // new size for this string }); // // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) { dreprog::destroy(prog); RMM_FREE(d_repl,0); return rtn; // all strings are null } double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the replace custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [prog, d_strings, d_repl, rsz, d_offsets, d_brefs, refcount, d_buffer, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; // nulls create nulls a\1bc\2d char* buffer = d_buffer + d_offsets[idx]; // output buffer _________ char* optr = buffer; // running output pointer ^ char* sptr = d_repl; // input buffer abcd int lpos = 0, begin = 0, end = (int)dstr->chars_count(); // insert extracted strings left-to-right if( prog->find(idx,dstr,begin,end) > 0 ) { for( unsigned int j=0; j < refcount; ++j ) // eval each ref { int refidx = d_brefs[j].first; // backref indicator abcd abcd int ipos = d_brefs[j].second; // input position ^ ^ int len = ipos - lpos; // bytes to copy from input memcpy(optr,sptr,len); // copy left half a________ axxbc____ optr += len; // move output ptr ^ ^ sptr += len; // move input ptr abcd abcd lpos += len; // update last-position ^ ^ int spos=begin, epos=end; // these are modified by extract if( (prog->extract(idx,dstr,spos,epos,refidx-1)<=0) || (epos <= spos) ) // xx yyy continue; // no value for this ref spos = dstr->byte_offset_for(spos); // convert to bytes int bytes = dstr->byte_offset_for(epos) - spos; memcpy(optr,dstr->data()+spos,bytes); // axx______ axxbcyyy_ optr += bytes; // move output ptr ^ ^ } } if( lpos < rsz ) { // copy any remaining characters from input string memcpy(optr,sptr,rsz-lpos); // axxbcyyyd optr += rsz-lpos; // ^ } unsigned int nsz = (unsigned int)(optr - buffer); // compute output size d_results[idx] = custring_view::create_from(buffer,buffer,nsz); // new string }); // hipError_t err = hipDeviceSynchronize(); double et2 = GetTime(); if( err != hipSuccess ) { fprintf(stderr,"nvs-replace_with_backref(%s,%s)\n",pattern,repl); printCudaError(err); } pImpl->addOpTimes("replace_with_backref",(et1-st1),(et2-st2)); // dreprog::destroy(prog); RMM_FREE(d_repl,0); return rtn; } // NVStrings* NVStrings::translate( std::pair<unsigned,unsigned>* utable, unsigned int tableSize ) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); // convert unicode table into utf8 table thrust::host_vector< thrust::pair<Char,Char> > htable(tableSize); for( unsigned int idx=0; idx < tableSize; ++idx ) { htable[idx].first = u2u8(utable[idx].first); htable[idx].second = u2u8(utable[idx].second); } // could sort on the device; this table should not be very big thrust::sort(thrust::host, htable.begin(), htable.end(), [] __host__ (thrust::pair<Char,Char> p1, thrust::pair<Char,Char> p2) { return p1.first > p2.first; }); // copy translate table to device memory rmm::device_vector< thrust::pair<Char,Char> > table(htable); thrust::pair<Char,Char>* d_table = table.data().get(); // compute size of each new string rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); int tsize = tableSize; double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_table, tsize, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; const char* sptr = dstr->data(); unsigned int bytes = dstr->size(); unsigned int nchars = dstr->chars_count(); for( unsigned int i=0; i < nchars; ++i ) { Char ch = dstr->at(i); Char nch = ch; for( int t=0; t < tsize; ++t ) // replace with faster lookup nch = ( ch==d_table[t].first ? d_table[t].second : nch ); int bic = custring_view::bytes_in_char(ch); int nbic = (nch ? custring_view::bytes_in_char(nch) : 0); bytes += nbic - bic; if( nch==0 ) --nchars; } unsigned int size = custring_view::alloc_size(bytes,nchars); d_sizes[idx] = ALIGN_SIZE(size); }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) return rtn; double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the translate custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_buffer, d_offsets, d_table, tsize, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; const char* sptr = dstr->data(); unsigned int nchars = dstr->chars_count(); char* optr = buffer; int nsz = 0; for( unsigned int i=0; i < nchars; ++i ) { Char ch = 0; unsigned int cw = custring_view::char_to_Char(sptr,ch); Char nch = ch; for( int t=0; t < tsize; ++t ) // replace with faster lookup nch = ( ch==d_table[t].first ? d_table[t].second : nch ); sptr += cw; if( nch==0 ) continue; unsigned int nbic = custring_view::Char_to_char(nch,optr); optr += nbic; nsz += nbic; } d_results[idx] = custring_view::create_from(buffer,buffer,nsz); }); // hipError_t err = hipDeviceSynchronize(); double et2 = GetTime(); if( err != hipSuccess ) { fprintf(stderr,"nvs-translate(...,%d)\n",(int)tableSize); printCudaError(err); } pImpl->addOpTimes("translate",(et1-st1),(et2-st2)); return rtn; } // // This will create a new instance replacing any nulls with the provided string. // The parameter can be an empty string or any other string but not null. NVStrings* NVStrings::fillna( const char* str ) { if( str==0 ) return 0; auto execpol = rmm::exec_policy(0); unsigned int ssz = (unsigned int)strlen(str); unsigned int asz = custring_view::alloc_size(str,ssz); char* d_str = 0; RMM_ALLOC(&d_str,ssz+1,0); hipMemcpy(d_str,str,ssz+1,hipMemcpyHostToDevice); // compute size of the output unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_str, asz, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; unsigned int size = asz; if( dstr ) size = dstr->alloc_size(); d_sizes[idx] = ALIGN_SIZE(size); }); // NVStrings* rtn = new NVStrings(count); // create output object char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); rmm::device_vector<size_t> offsets(count,0); // create offsets thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the thing custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_str, ssz, d_buffer, d_offsets, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; char* buffer = d_buffer + d_offsets[idx]; if( dstr ) dstr = custring_view::create_from(buffer,*dstr); else dstr = custring_view::create_from(buffer,d_str,ssz); d_results[idx] = dstr; }); // hipError_t err = hipDeviceSynchronize(); if( err != hipSuccess ) { fprintf(stderr,"nvs-fillna(%s)\n",str); printCudaError(err); } RMM_FREE(d_str,0); return rtn; }
05cf1b780761bc096462914b9c7663a61ffa703d.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdlib.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/sort.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include "NVStrings.h" #include "NVStringsImpl.h" #include "custring_view.cuh" #include "regex/regex.cuh" #include "regex/backref.h" #include "unicode/is_flags.h" #include "Timing.h" #include "util.h" // NVStrings* NVStrings::slice_replace( const char* repl, int start, int stop ) { if( !repl ) return 0; auto execpol = rmm::exec_policy(0); unsigned int replen = (unsigned int)strlen(repl); char* d_repl = 0; RMM_ALLOC(&d_repl,replen,0); cudaMemcpy(d_repl,repl,replen,cudaMemcpyHostToDevice); // compute size of output buffer unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> lengths(count,0); size_t* d_lengths = lengths.data().get(); double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_repl, replen, start, stop, d_lengths] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; unsigned int len = 0; if( start < dstr->chars_count() ) len = dstr->replace_size((unsigned)start,(unsigned)(stop-start),d_repl,replen); else { // another odd pandas case: if out-of-bounds, just append int bytes = dstr->size() + replen; int nchars = dstr->chars_count() + custring_view::chars_in_string(d_repl,replen); len = custring_view::alloc_size(bytes,nchars); } len = ALIGN_SIZE(len); d_lengths[idx] = (size_t)len; }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if( d_buffer==0 ) { if( d_repl ) RMM_FREE(d_repl,0); return rtn; } double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin()); // do the slice and replace custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count, [d_strings, d_repl, replen, start, stop, d_buffer, d_offsets, d_results] __device__(size_t idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; custring_view* dout = 0; if( start < dstr->chars_count() ) dout = dstr->replace((unsigned)start,(unsigned)(stop-start),d_repl,replen,buffer); else { // append for pandas consistency int bytes = dstr->size(); char* ptr = buffer; memcpy( ptr, dstr->data(), bytes ); ptr += bytes; memcpy( ptr, d_repl, replen ); bytes += replen; dout = custring_view::create_from(buffer,buffer,bytes); } d_results[idx] = dout; }); // cudaError_t err = cudaDeviceSynchronize(); double et2 = GetTime(); if( err != cudaSuccess ) { fprintf(stderr,"nvs-slice_replace(%s,%d,%d)\n",repl,start,stop); printCudaError(err); } if( d_repl ) RMM_FREE(d_repl,0); pImpl->addOpTimes("slice_replace",(et1-st1),(et2-st2)); return rtn; } // this should replace multiple occurrences up to maxrepl NVStrings* NVStrings::replace( const char* str, const char* repl, int maxrepl ) { if( !str || !*str ) return 0; // null and empty string not allowed auto execpol = rmm::exec_policy(0); unsigned int ssz = (unsigned int)strlen(str); char* d_str = 0; RMM_ALLOC(&d_str,ssz,0); cudaMemcpy(d_str,str,ssz,cudaMemcpyHostToDevice); unsigned int sszch = custring_view::chars_in_string(str,ssz); if( !repl ) repl = ""; unsigned int rsz = (unsigned int)strlen(repl); char* d_repl = 0; RMM_ALLOC(&d_repl,rsz,0); cudaMemcpy(d_repl,repl,rsz,cudaMemcpyHostToDevice); unsigned int rszch = custring_view::chars_in_string(repl,rsz); // compute size of the output unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_str, ssz, sszch, d_repl, rsz, rszch, maxrepl, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int mxn = maxrepl; if( mxn < 0 ) mxn = dstr->chars_count(); //max possible replaces for this string unsigned int bytes = dstr->size(), nchars = dstr->chars_count(); int pos = dstr->find(d_str,ssz); // counting bytes and chars while((pos >= 0) && (mxn > 0)) { bytes += rsz - ssz; nchars += rszch - sszch; pos = dstr->find(d_str,ssz,(unsigned)pos+sszch); // next one --mxn; } unsigned int size = custring_view::alloc_size(bytes,nchars); d_sizes[idx] = ALIGN_SIZE(size); }); // // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) { RMM_FREE(d_str,0); RMM_FREE(d_repl,0); return rtn; // all strings are null } double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the thing custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_str, ssz, sszch, d_repl, rsz, d_buffer, d_offsets, maxrepl, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int mxn = maxrepl; if( mxn < 0 ) mxn = dstr->chars_count(); //max possible replaces for this string // char* buffer = d_buffer + d_offsets[idx]; char* sptr = dstr->data(); char* optr = buffer; unsigned int size = dstr->size(); int pos = dstr->find(d_str,ssz), lpos=0; while((pos >= 0) && (mxn > 0)) { // i:bbbbsssseeee int spos = dstr->byte_offset_for(pos); // ^ memcpy(optr,sptr+lpos,spos-lpos); // o:bbbb optr += spos - lpos; // ^ memcpy(optr,d_repl,rsz); // o:bbbbrrrr optr += rsz; // ^ lpos = spos + ssz; // i:bbbbsssseeee pos = dstr->find(d_str,ssz,pos+sszch); // ^ --mxn; } memcpy(optr,sptr+lpos,size-lpos); // o:bbbbrrrreeee unsigned int nsz = (unsigned int)(optr - buffer) + size - lpos; d_results[idx] = custring_view::create_from(buffer,buffer,nsz); }); // cudaError_t err = cudaDeviceSynchronize(); double et2 = GetTime(); if( err != cudaSuccess ) { fprintf(stderr,"nvs-replace(%s,%s,%d)\n",str,repl,maxrepl); printCudaError(err); } pImpl->addOpTimes("replace",(et1-st1),(et2-st2)); RMM_FREE(d_str,0); RMM_FREE(d_repl,0); return rtn; } // same as above except parameter is regex NVStrings* NVStrings::replace_re( const char* pattern, const char* repl, int maxrepl ) { if( !pattern || !*pattern ) return 0; // null and empty string not allowed unsigned int count = size(); if( count==0 ) return new NVStrings(count); auto execpol = rmm::exec_policy(0); // compile regex into device object const char32_t* ptn32 = to_char32(pattern); dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count); delete ptn32; // // copy replace string to device memory if( !repl ) repl = ""; unsigned int rsz = (unsigned int)strlen(repl); char* d_repl = 0; RMM_ALLOC(&d_repl,rsz,0); cudaMemcpy(d_repl,repl,rsz,cudaMemcpyHostToDevice); unsigned int rszch = custring_view::chars_in_string(repl,rsz); // compute size of the output custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [prog, d_strings, d_repl, rsz, rszch, maxrepl, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int mxn = maxrepl; if( mxn < 0 ) mxn = dstr->chars_count(); //max possible replaces for this string unsigned int bytes = dstr->size(), nchars = dstr->chars_count(); int begin = 0, end = (int)nchars; int result = prog->find(idx,dstr,begin,end); while((result > 0) && (mxn > 0)) { bytes += rsz - (dstr->byte_offset_for(end)-dstr->byte_offset_for(begin)); nchars += rszch - (end-begin); begin = end; end = (int)nchars; result = prog->find(idx,dstr,begin,end); // next one --mxn; } unsigned int size = custring_view::alloc_size(bytes,nchars); d_sizes[idx] = ALIGN_SIZE(size); }); // // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) { dreprog::destroy(prog); RMM_FREE(d_repl,0); return rtn; // all strings are null } double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the replace custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [prog, d_strings, d_repl, rsz, d_buffer, d_offsets, maxrepl, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int mxn = maxrepl; int nchars = (int)dstr->chars_count(); if( mxn < 0 ) mxn = nchars; //max possible replaces for this string char* buffer = d_buffer + d_offsets[idx]; // output buffer char* sptr = dstr->data(); // input buffer char* optr = buffer; // running output pointer unsigned int size = dstr->size(); // number of byte in input string int lpos = 0, begin = 0, end = nchars; // working vars // copy input to output replacing strings as we go int result = prog->find(idx,dstr,begin,end); while((result > 0) && (mxn > 0)) { // i:bbbbsssseeee int spos = dstr->byte_offset_for(begin); // ^ memcpy(optr,sptr+lpos,spos-lpos); // o:bbbb optr += spos - lpos; // ^ memcpy(optr,d_repl,rsz); // o:bbbbrrrr optr += rsz; // ^ lpos = dstr->byte_offset_for(end); // i:bbbbsssseeee begin = end; // ^ end = nchars; result = prog->find(idx,dstr,begin,end); --mxn; } // copy the rest: memcpy(optr,sptr+lpos,size-lpos); // o:bbbbrrrreeee unsigned int nsz = (unsigned int)(optr - buffer) + size - lpos; d_results[idx] = custring_view::create_from(buffer,buffer,nsz); }); // cudaError_t err = cudaDeviceSynchronize(); double et2 = GetTime(); if( err != cudaSuccess ) { fprintf(stderr,"nvs-replace_re(%s,%s,%d)\n",pattern,repl,maxrepl); printCudaError(err); } pImpl->addOpTimes("replace_re",(et1-st1),(et2-st2)); // dreprog::destroy(prog); RMM_FREE(d_repl,0); return rtn; } // not even close to the others NVStrings* NVStrings::replace_with_backrefs( const char* pattern, const char* repl ) { if( !pattern || !*pattern ) return 0; // null and empty string not allowed unsigned int count = size(); if( count==0 || repl==0 ) return new NVStrings(count); // returns all nulls auto execpol = rmm::exec_policy(0); // compile regex into device object const char32_t* ptn32 = to_char32(pattern); dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count); delete ptn32; // // parse the repl string for backref indicators std::vector<thrust::pair<int,int> > brefs; std::string srepl = parse_backrefs(repl,brefs); unsigned int rsz = (unsigned int)srepl.size(); char* d_repl = 0; RMM_ALLOC(&d_repl,rsz,0); cudaMemcpy(d_repl,srepl.c_str(),rsz,cudaMemcpyHostToDevice); unsigned int rszch = custring_view::chars_in_string(srepl.c_str(),rsz); rmm::device_vector<thrust::pair<int,int> > dbrefs(brefs); auto d_brefs = dbrefs.data().get(); unsigned int refcount = (unsigned int)dbrefs.size(); // compute size of the output custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [prog, d_strings, rsz, rszch, d_brefs, refcount, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; unsigned int bytes = rsz, nchars = rszch; // start with template int begin = 0, end = (int)dstr->chars_count(); // constants if( prog->find(idx,dstr,begin,end) > 0 ) { for( unsigned int j=0; j < refcount; ++j ) // eval each ref { int refidx = d_brefs[j].first; // backref indicator int spos=begin, epos=end; // modified by extract if( (prog->extract(idx,dstr,spos,epos,refidx-1)<=0) || (epos <= spos) ) continue; // no value for this ref nchars += epos - spos; // add up chars spos = dstr->byte_offset_for(spos); // convert to bytes bytes += dstr->byte_offset_for(epos) - spos; // add up bytes } } unsigned int size = custring_view::alloc_size(bytes,nchars); d_sizes[idx] = ALIGN_SIZE(size); // new size for this string }); // // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) { dreprog::destroy(prog); RMM_FREE(d_repl,0); return rtn; // all strings are null } double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the replace custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [prog, d_strings, d_repl, rsz, d_offsets, d_brefs, refcount, d_buffer, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; // nulls create nulls a\1bc\2d char* buffer = d_buffer + d_offsets[idx]; // output buffer _________ char* optr = buffer; // running output pointer ^ char* sptr = d_repl; // input buffer abcd int lpos = 0, begin = 0, end = (int)dstr->chars_count(); // insert extracted strings left-to-right if( prog->find(idx,dstr,begin,end) > 0 ) { for( unsigned int j=0; j < refcount; ++j ) // eval each ref { int refidx = d_brefs[j].first; // backref indicator abcd abcd int ipos = d_brefs[j].second; // input position ^ ^ int len = ipos - lpos; // bytes to copy from input memcpy(optr,sptr,len); // copy left half a________ axxbc____ optr += len; // move output ptr ^ ^ sptr += len; // move input ptr abcd abcd lpos += len; // update last-position ^ ^ int spos=begin, epos=end; // these are modified by extract if( (prog->extract(idx,dstr,spos,epos,refidx-1)<=0) || (epos <= spos) ) // xx yyy continue; // no value for this ref spos = dstr->byte_offset_for(spos); // convert to bytes int bytes = dstr->byte_offset_for(epos) - spos; memcpy(optr,dstr->data()+spos,bytes); // axx______ axxbcyyy_ optr += bytes; // move output ptr ^ ^ } } if( lpos < rsz ) { // copy any remaining characters from input string memcpy(optr,sptr,rsz-lpos); // axxbcyyyd optr += rsz-lpos; // ^ } unsigned int nsz = (unsigned int)(optr - buffer); // compute output size d_results[idx] = custring_view::create_from(buffer,buffer,nsz); // new string }); // cudaError_t err = cudaDeviceSynchronize(); double et2 = GetTime(); if( err != cudaSuccess ) { fprintf(stderr,"nvs-replace_with_backref(%s,%s)\n",pattern,repl); printCudaError(err); } pImpl->addOpTimes("replace_with_backref",(et1-st1),(et2-st2)); // dreprog::destroy(prog); RMM_FREE(d_repl,0); return rtn; } // NVStrings* NVStrings::translate( std::pair<unsigned,unsigned>* utable, unsigned int tableSize ) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); // convert unicode table into utf8 table thrust::host_vector< thrust::pair<Char,Char> > htable(tableSize); for( unsigned int idx=0; idx < tableSize; ++idx ) { htable[idx].first = u2u8(utable[idx].first); htable[idx].second = u2u8(utable[idx].second); } // could sort on the device; this table should not be very big thrust::sort(thrust::host, htable.begin(), htable.end(), [] __host__ (thrust::pair<Char,Char> p1, thrust::pair<Char,Char> p2) { return p1.first > p2.first; }); // copy translate table to device memory rmm::device_vector< thrust::pair<Char,Char> > table(htable); thrust::pair<Char,Char>* d_table = table.data().get(); // compute size of each new string rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); int tsize = tableSize; double st1 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_table, tsize, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; const char* sptr = dstr->data(); unsigned int bytes = dstr->size(); unsigned int nchars = dstr->chars_count(); for( unsigned int i=0; i < nchars; ++i ) { Char ch = dstr->at(i); Char nch = ch; for( int t=0; t < tsize; ++t ) // replace with faster lookup nch = ( ch==d_table[t].first ? d_table[t].second : nch ); int bic = custring_view::bytes_in_char(ch); int nbic = (nch ? custring_view::bytes_in_char(nch) : 0); bytes += nbic - bic; if( nch==0 ) --nchars; } unsigned int size = custring_view::alloc_size(bytes,nchars); d_sizes[idx] = ALIGN_SIZE(size); }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) return rtn; double et1 = GetTime(); // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the translate custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); double st2 = GetTime(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_buffer, d_offsets, d_table, tsize, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; const char* sptr = dstr->data(); unsigned int nchars = dstr->chars_count(); char* optr = buffer; int nsz = 0; for( unsigned int i=0; i < nchars; ++i ) { Char ch = 0; unsigned int cw = custring_view::char_to_Char(sptr,ch); Char nch = ch; for( int t=0; t < tsize; ++t ) // replace with faster lookup nch = ( ch==d_table[t].first ? d_table[t].second : nch ); sptr += cw; if( nch==0 ) continue; unsigned int nbic = custring_view::Char_to_char(nch,optr); optr += nbic; nsz += nbic; } d_results[idx] = custring_view::create_from(buffer,buffer,nsz); }); // cudaError_t err = cudaDeviceSynchronize(); double et2 = GetTime(); if( err != cudaSuccess ) { fprintf(stderr,"nvs-translate(...,%d)\n",(int)tableSize); printCudaError(err); } pImpl->addOpTimes("translate",(et1-st1),(et2-st2)); return rtn; } // // This will create a new instance replacing any nulls with the provided string. // The parameter can be an empty string or any other string but not null. NVStrings* NVStrings::fillna( const char* str ) { if( str==0 ) return 0; auto execpol = rmm::exec_policy(0); unsigned int ssz = (unsigned int)strlen(str); unsigned int asz = custring_view::alloc_size(str,ssz); char* d_str = 0; RMM_ALLOC(&d_str,ssz+1,0); cudaMemcpy(d_str,str,ssz+1,cudaMemcpyHostToDevice); // compute size of the output unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_str, asz, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; unsigned int size = asz; if( dstr ) size = dstr->alloc_size(); d_sizes[idx] = ALIGN_SIZE(size); }); // NVStrings* rtn = new NVStrings(count); // create output object char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); rmm::device_vector<size_t> offsets(count,0); // create offsets thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // do the thing custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_str, ssz, d_buffer, d_offsets, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; char* buffer = d_buffer + d_offsets[idx]; if( dstr ) dstr = custring_view::create_from(buffer,*dstr); else dstr = custring_view::create_from(buffer,d_str,ssz); d_results[idx] = dstr; }); // cudaError_t err = cudaDeviceSynchronize(); if( err != cudaSuccess ) { fprintf(stderr,"nvs-fillna(%s)\n",str); printCudaError(err); } RMM_FREE(d_str,0); return rtn; }
57650ff7579eea0724919bbc42d652bf63f66f7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(hipGetLastError()); */ static void checkCudaCall(hipError_t result) { if (result != hipSuccess) { cerr << "cuda error: " << hipGetErrorString(result) << endl; exit(1); } } int fileSize(char *fileName) { int size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.close(); } else { cout << "Unable to open file"; size = -1; } return size; } int readData(char *fileName, char *data) { streampos size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.seekg (0, ios::beg); file.read (data, size); file.close(); cout << "The entire file content is in memory." << endl; } else cout << "Unable to open file" << endl; return 0; } int writeData(int size, char *fileName, char *data) { ofstream file (fileName, ios::out|ios::binary|ios::trunc); if (file.is_open()) { file.write (data, size); file.close(); cout << "The entire file content was written to file." << endl; return 0; } else cout << "Unable to open file"; return -1; } __global__ void checksumKernel(unsigned int *deviceDataIn){ unsigned index = blockIdx.x * blockDim.x + threadIdx.x; } unsigned int checksumSeq (int n, unsigned int* data_in) { int i; timer sequentialTime = timer("Sequential checksum"); sequentialTime.start(); for (i=0; i<n; i++) {} sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Checksum (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } /** * The checksumCuda handler that initialises the arrays to be used and calls * the checksum kernel. It also computes the missing values not calculated * on the GPU. It then adds all values together and prints the checksum */ unsigned int checksumCuda (int n, unsigned int* data_in) { int threadBlockSize = 512; /* allocate the vectors on the GPU */ unsigned int* deviceDataIn = NULL; checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(unsigned int))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; exit(1); } timer kernelTime = timer("kernelTime"); timer memoryTime = timer("memoryTime"); /* copy the original vectors to the GPU */ memoryTime.start(); checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(unsigned int), hipMemcpyHostToDevice)); memoryTime.stop(); kernelTime.start(); hipLaunchKernelGGL(( checksumKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceDataIn); hipDeviceSynchronize(); kernelTime.stop(); // Copies back the correct data checkCudaCall(hipMemcpy(data_in, deviceDataIn, n*sizeof(unsigned int), hipMemcpyDeviceToHost)); /* check whether the kernel invocation was successful */ checkCudaCall(hipGetLastError()); /* copy result back */ checkCudaCall(hipFree(deviceDataIn)); /* The times are printed */ cout << fixed << setprecision(6); cout << "Kernel: \t\t" << kernelTime.getElapsed() << " seconds." << endl; cout << "Memory: \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int main(int argc, char* argv[]) { int n; int seq; char *fileName; // Arg parse if (argc == 3) { fileName = (char*)argv[2]; seq = atoi(argv[1]); printf("Chosen for option: %d\n", seq); printf("opening file %s\n", fileName); } else { printf("non valid options\n"); return EXIT_FAILURE; } n = fileSize(fileName); if (n == -1) { printf("file not found\n"); exit(0); } char* data_in = new char[n]; readData(fileName, data_in); unsigned int *data_in_raw = new unsigned int[n]; for (int i = 0; i < n; i++){ data_in_raw[i] = data_in[i]; } /* Check the option to determine the functions to be called */ if (seq == 1){ // Only sequential checkusm is ran unsigned int checksum = checksumSeq(n, data_in_raw); printf("Sequental checksum %u\n", checksum); } else if (seq == 0) { // Only cuda checksum is ran unsigned int checksum = checksumCuda(n, data_in_raw); printf("Cuda checksum %u\n", checksum); } else if (seq == 2){ // Both the sequential and the cuda checksum are run unsigned int checksum = checksumCuda(n, data_in_raw); printf("Cuda checksum %u\n", checksum); checksum = checksumSeq(n, data_in_raw); printf("Sequental checksum %u\n", checksum); } delete[] data_in; return 0; }
57650ff7579eea0724919bbc42d652bf63f66f7f.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(cudaGetLastError()); */ static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { cerr << "cuda error: " << cudaGetErrorString(result) << endl; exit(1); } } int fileSize(char *fileName) { int size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.close(); } else { cout << "Unable to open file"; size = -1; } return size; } int readData(char *fileName, char *data) { streampos size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.seekg (0, ios::beg); file.read (data, size); file.close(); cout << "The entire file content is in memory." << endl; } else cout << "Unable to open file" << endl; return 0; } int writeData(int size, char *fileName, char *data) { ofstream file (fileName, ios::out|ios::binary|ios::trunc); if (file.is_open()) { file.write (data, size); file.close(); cout << "The entire file content was written to file." << endl; return 0; } else cout << "Unable to open file"; return -1; } __global__ void checksumKernel(unsigned int *deviceDataIn){ unsigned index = blockIdx.x * blockDim.x + threadIdx.x; } unsigned int checksumSeq (int n, unsigned int* data_in) { int i; timer sequentialTime = timer("Sequential checksum"); sequentialTime.start(); for (i=0; i<n; i++) {} sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Checksum (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } /** * The checksumCuda handler that initialises the arrays to be used and calls * the checksum kernel. It also computes the missing values not calculated * on the GPU. It then adds all values together and prints the checksum */ unsigned int checksumCuda (int n, unsigned int* data_in) { int threadBlockSize = 512; /* allocate the vectors on the GPU */ unsigned int* deviceDataIn = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(unsigned int))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; exit(1); } timer kernelTime = timer("kernelTime"); timer memoryTime = timer("memoryTime"); /* copy the original vectors to the GPU */ memoryTime.start(); checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(unsigned int), cudaMemcpyHostToDevice)); memoryTime.stop(); kernelTime.start(); checksumKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceDataIn); cudaDeviceSynchronize(); kernelTime.stop(); // Copies back the correct data checkCudaCall(cudaMemcpy(data_in, deviceDataIn, n*sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* check whether the kernel invocation was successful */ checkCudaCall(cudaGetLastError()); /* copy result back */ checkCudaCall(cudaFree(deviceDataIn)); /* The times are printed */ cout << fixed << setprecision(6); cout << "Kernel: \t\t" << kernelTime.getElapsed() << " seconds." << endl; cout << "Memory: \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int main(int argc, char* argv[]) { int n; int seq; char *fileName; // Arg parse if (argc == 3) { fileName = (char*)argv[2]; seq = atoi(argv[1]); printf("Chosen for option: %d\n", seq); printf("opening file %s\n", fileName); } else { printf("non valid options\n"); return EXIT_FAILURE; } n = fileSize(fileName); if (n == -1) { printf("file not found\n"); exit(0); } char* data_in = new char[n]; readData(fileName, data_in); unsigned int *data_in_raw = new unsigned int[n]; for (int i = 0; i < n; i++){ data_in_raw[i] = data_in[i]; } /* Check the option to determine the functions to be called */ if (seq == 1){ // Only sequential checkusm is ran unsigned int checksum = checksumSeq(n, data_in_raw); printf("Sequental checksum %u\n", checksum); } else if (seq == 0) { // Only cuda checksum is ran unsigned int checksum = checksumCuda(n, data_in_raw); printf("Cuda checksum %u\n", checksum); } else if (seq == 2){ // Both the sequential and the cuda checksum are run unsigned int checksum = checksumCuda(n, data_in_raw); printf("Cuda checksum %u\n", checksum); checksum = checksumSeq(n, data_in_raw); printf("Sequental checksum %u\n", checksum); } delete[] data_in; return 0; }
f4a8a8d80dfd7450f64edb1751c7785d1ba38adf.hip
// !!! This is a file automatically generated by hipify!!! // this file implements CUDA functionality for ScalaLab. // it is compiled with the NVIDIA nvcc compiler // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <memory> #include <iostream> #include "CUDAOps_KernelOps.h" #include <hipfft.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <helper_cuda.h> // Thread block size #define BLOCK_SIZE 16 inline bool IsGPUCapableP2P(hipDeviceProp_t *pProp) { #ifdef _WIN32 return (bool)(pProp->tccDriver ? true : false); #else return (bool)(pProp->major >= 2); #endif } inline bool IsAppBuiltAs64() { #if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) return 1; #else return 0; #endif } // perform FFT using CUFFT library of CUDA void cudafft( hipfftReal * hinData, int NX, float *hOut ) { hipfftHandle plan; hipfftComplex *dfftdata; hipMalloc( (void **)&dfftdata, sizeof(hipfftComplex)*(NX/2+1)); if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Cuda error: Failed to allocate\n"); return; } /* Copy input float array to the dfftdata device array that consists of hipfftComplex data */ hipMemcpy(dfftdata, hinData, NX*sizeof(float), hipMemcpyHostToDevice); if (hipfftPlan1d(&plan, NX, HIPFFT_R2C, 1) != HIPFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return; } // use the CUFFT plan to transform the signal in place if (hipfftExecR2C(plan, (hipfftReal *) dfftdata, dfftdata ) != HIPFFT_SUCCESS) { fprintf(stderr, "CUFFT error: ExecC2C Forward failed"); return; } if (hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "Cuda error: Failed to synchronize \n"); return; } // copy computed results in device space to host hipMemcpy( hOut, dfftdata, NX*sizeof(float), hipMemcpyDeviceToHost); hipfftDestroy(plan); } // JNI function to call the C cudafft() routine JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cudafft (JNIEnv *env, jobject obj, jfloatArray inData, jint N, jfloatArray outRealImsFFT) { jfloat *data = env->GetFloatArrayElements(inData, 0); jfloat *outRealIms = env->GetFloatArrayElements(outRealImsFFT, 0); // perform the FFT cudafft( data, N, outRealIms ); env->ReleaseFloatArrayElements( inData, data, 0); env->ReleaseFloatArrayElements( outRealImsFFT, outRealIms, 0); } static int simple_sgemm(const float *h_A, int hA, int wA, const float *h_B, int wB, float *h_C) { int hB = wA; int hC = hA; int wC = wB; float alpha = 1.0f; float beta = 0.0f; float *d_A = 0; float *d_B = 0; float *d_C = 0; hipblasHandle_t handle; hipblasStatus_t status; /* Initialize CUBLAS */ status = hipblasCreate(&handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return EXIT_FAILURE; } /* Allocate device memory for the matrices */ if (hipMalloc((void **)&d_A, hA * wA * sizeof(d_A[0])) != hipSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate A)\n"); return EXIT_FAILURE; } if (hipMalloc((void **)&d_B, hB * wB * sizeof(d_B[0])) != hipSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate B)\n"); return EXIT_FAILURE; } if (hipMalloc((void **)&d_C, hC * wC * sizeof(d_C[0])) != hipSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate C)\n"); return EXIT_FAILURE; } /* Initialize the device matrices with the host matrices */ hipMemcpy( d_A, h_A, hA*wA*sizeof(float), hipMemcpyHostToDevice); hipMemcpy( d_B, h_B, hB*wB*sizeof(float), hipMemcpyHostToDevice); int lda = hA; int ldb = hB; int ldc = hC; /* Performs operation using cublas */ status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, hA, wB, wA, &alpha, d_A, lda, d_B, ldb, &beta, d_C, ldc); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! kernel execution error.\n"); return EXIT_FAILURE; } hipMemcpy( h_C, d_C, hC*wC*sizeof(float), hipMemcpyDeviceToHost); /* Memory clean up */ if (hipFree(d_A) != hipSuccess) { fprintf(stderr, "!!!! memory free error (A)\n"); return EXIT_FAILURE; } if (hipFree(d_B) != hipSuccess) { fprintf(stderr, "!!!! memory free error (B)\n"); return EXIT_FAILURE; } if (hipFree(d_C) != hipSuccess) { fprintf(stderr, "!!!! memory free error (C)\n"); return EXIT_FAILURE; } /* Shutdown */ status = hipblasDestroy(handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return EXIT_FAILURE; } return 1; } static int simple_dgemm(const double *h_A, int hA, int wA, const double *h_B, int wB, double *h_C) { int hB = wA; int hC = hA; int wC = wB; double alpha = 1.0; double beta = 0.0; double *d_A = 0; double *d_B = 0; double *d_C = 0; hipblasHandle_t handle; hipblasStatus_t status; /* Initialize CUBLAS */ status = hipblasCreate(&handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return EXIT_FAILURE; } /* Allocate device memory for the matrices */ if (hipMalloc((void **)&d_A, hA * wA * sizeof(d_A[0])) != hipSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate A)\n"); return EXIT_FAILURE; } if (hipMalloc((void **)&d_B, hB * wB * sizeof(d_B[0])) != hipSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate B)\n"); return EXIT_FAILURE; } if (hipMalloc((void **)&d_C, hC * wC * sizeof(d_C[0])) != hipSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate C)\n"); return EXIT_FAILURE; } /* Initialize the device matrices with the host matrices */ hipMemcpy( d_A, h_A, hA*wA*sizeof(double), hipMemcpyHostToDevice); hipMemcpy( d_B, h_B, hB*wB*sizeof(double), hipMemcpyHostToDevice); int lda = hA; int ldb = hB; int ldc = hC; /* Performs operation using cublas */ status = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, hA, wB, wA, &alpha, d_A, lda, d_B, ldb, &beta, d_C, ldc); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! kernel execution error.\n"); return EXIT_FAILURE; } hipMemcpy( h_C, d_C, hC*wC*sizeof(double), hipMemcpyDeviceToHost); /* Memory clean up */ if (hipFree(d_A) != hipSuccess) { fprintf(stderr, "!!!! memory free error (A)\n"); return EXIT_FAILURE; } if (hipFree(d_B) != hipSuccess) { fprintf(stderr, "!!!! memory free error (B)\n"); return EXIT_FAILURE; } if (hipFree(d_C) != hipSuccess) { fprintf(stderr, "!!!! memory free error (C)\n"); return EXIT_FAILURE; } /* Shutdown */ status = hipblasDestroy(handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return EXIT_FAILURE; } return 1; } // kernels for matrix addition/subtraction/multiply with scalar // however, due to tranfer delays, linear time operations like matrix addition / subtraction / multiply with scalar // runs usually faster in host CPU __global__ void add_matrix(float *a, float *b, float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx] + b[idx]; } __global__ void dadd_matrix(double *a, double *b, double *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx] + b[idx]; } __global__ void subtract_matrix(float *a, float *b, float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx] - b[idx]; } __global__ void dsubtract_matrix(double *a, double *b, double *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx] - b[idx]; } __global__ void dmul_Scalar_matrix(double *a, double value, double *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx]*value; } __global__ void mul_Scalar_matrix(float *a, float value, float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx]*value; } void cuda_matrixAdd(float *a_h, float *b_h, float *c_h, int N) { float *a_d, *b_d, *c_d; size_t size = N * sizeof (float); // allocate memory in the GPU device for a, b and c hipMalloc((void **) & a_d, size); hipMalloc((void **) & b_d, size); hipMalloc((void **) & c_d, size); // copy from host to GPU device hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); hipLaunchKernelGGL(( add_matrix) , dim3(n_blocks), dim3(block_size) , 0, 0, a_d, b_d, c_d, N); // Retrieve results from the device hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); // Cleanup hipFree(a_d); hipFree(b_d); hipFree(c_d); } void dcuda_matrixAdd(double *a_h, double *b_h, double *c_h, int N) { double *a_d, *b_d, *c_d; size_t size = N * sizeof (double); // allocate memory in the GPU device for a, b and c hipMalloc((void **) & a_d, size); hipMalloc((void **) & b_d, size); hipMalloc((void **) & c_d, size); // copy from host to GPU device hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); hipLaunchKernelGGL(( dadd_matrix) , dim3(n_blocks), dim3(block_size) , 0, 0, a_d, b_d, c_d, N); // Retrieve results from the device hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); // Cleanup hipFree(a_d); hipFree(b_d); hipFree(c_d); } void cuda_matrixSubtract(float *a_h, float *b_h, float *c_h, int N) { float *a_d, *b_d, *c_d; size_t size = N * sizeof (float); // allocate memory in the GPU device for a, b and c hipMalloc((void **) & a_d, size); hipMalloc((void **) & b_d, size); hipMalloc((void **) & c_d, size); // copy from host to GPU device hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); hipLaunchKernelGGL(( subtract_matrix) , dim3(n_blocks), dim3(block_size) , 0, 0, a_d, b_d, c_d, N); // Retrieve results from the device hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); // Cleanup hipFree(a_d); hipFree(b_d); hipFree(c_d); } void dcuda_matrixSubtract(double *a_h, double *b_h, double *c_h, int N) { double *a_d, *b_d, *c_d; size_t size = N * sizeof (double); // allocate memory in the GPU device for a, b and c hipMalloc((void **) & a_d, size); hipMalloc((void **) & b_d, size); hipMalloc((void **) & c_d, size); // copy from host to GPU device hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); hipLaunchKernelGGL(( dsubtract_matrix) , dim3(n_blocks), dim3(block_size) , 0, 0, a_d, b_d, c_d, N); // Retrieve results from the device hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); // Cleanup hipFree(a_d); hipFree(b_d); hipFree(c_d); } void cuda_matrixMulScalar(float *a_h, float scalarValue, float *c_h, int N) { float *a_d, *c_d; size_t size = N * sizeof (float); // allocate memory in the GPU device for a, b and c hipMalloc((void **) & a_d, size); hipMalloc((void **) & c_d, size); // copy from host to GPU device hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); hipLaunchKernelGGL(( mul_Scalar_matrix) , dim3(n_blocks), dim3(block_size) , 0, 0, a_d, scalarValue, c_d, N); // Retrieve results from the device hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); // Cleanup hipFree(a_d); hipFree(c_d); } void dcuda_matrixMulScalar(double *a_h, double scalarValue, double *c_h, int N) { double *a_d, *c_d; size_t size = N * sizeof (double); // allocate memory in the GPU device for a, b and c hipMalloc((void **) & a_d, size); hipMalloc((void **) & c_d, size); // copy from host to GPU device hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); hipLaunchKernelGGL(( dmul_Scalar_matrix) , dim3(n_blocks), dim3(block_size) , 0, 0, a_d, scalarValue, c_d, N); // Retrieve results from the device hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); // Cleanup hipFree(a_d); hipFree(c_d); } // Device multiplication function called by Mul() // Compute C = A * B // hA is the height of A (i.e. # rows) // wA is the width of A (i.e. # columns) // wB is the width of B __global__ void Muld(float* A, float* B, int hA, int wA, int wC, float* C) { // each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0.0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row >= hA || col >= wC) return; for (int e=0; e<wA; ++e) Cvalue += (A[row*wA+e]) *(B[e*wC+col]); C[row*wC+col] = Cvalue; } // Device multiplication function called by Mul() // Compute C = A * B // hA is the height of A (i.e. # rows) // wA is the width of A (i.e. # columns) // wB is the width of B __global__ void dMuld(double* A, double* B, int hA, int wA, int wC, double* C) { // each thread computes one element of C // by accumulating results into Cvalue double Cvalue = 0.0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row >= hA || col >= wC) return; for (int e=0; e<wA; ++e) Cvalue += (A[row*wA+e]) *(B[e*wC+col]); C[row*wC+col] = Cvalue; } // Host multiplication function // Compute C = A * B // hA is the height of A (i.e. # rows) // wA is the width of A (i.e. # cols) // wB is the width of B void Mul(const float* A, const float* B, int hA, int wA, int wB, float* C) { int size; // Load A and B to the device float* Ad; size = hA * wA * sizeof(float); hipError_t err = hipMalloc((void**)&Ad, size); //printf("CUDA malloc A: %s \n", hipGetErrorString(err)); err = hipMemcpy(Ad, A, size, hipMemcpyHostToDevice); //printf("Copy A to device: %s \n", hipGetErrorString(err)); float* Bd; int hB = wA; // #rows of B == #columns of A size = hB * wB * sizeof(float); err = hipMalloc((void**)&Bd, size); // printf("CUDA malloc B: %s \n", hipGetErrorString(err)); err = hipMemcpy(Bd, B, size, hipMemcpyHostToDevice); //printf("Copy B to device: %s \n", hipGetErrorString(err)); // Allocate C on the device float* Cd; int hC = hA; // #rows of C == #rows of A int wC = wB; // #columns of C == #columns of B size = hC * wC * sizeof(float); err = hipMalloc((void**)&Cd, size); // printf("CUDA malloc C: %s \n", hipGetErrorString(err)); // Compute the execution configuration assuming // the matrix dimensions are multiples of BLOCK_SIZE /******************** calculates the execution configuration effectively the kernel function <Muld> will be executed concurrently by BLOCK_SIZE^2 GPU threads ************************/ dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((wB + dimBlock.x-1)/dimBlock.x, (hA+dimBlock.y-1) / dimBlock.y); // Launch the device computation hipLaunchKernelGGL(( Muld), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd, hA, wA, wC, Cd); err = hipDeviceSynchronize(); //printf("Run kernel: %s \n", hipGetErrorString(err)); // Read C from the device err = hipMemcpy(C, Cd, size, hipMemcpyDeviceToHost); // printf("Copy C off the device: %s \n", hipGetErrorString(err)); // Free device memory hipFree(Ad); hipFree(Bd); hipFree(Cd); } // Host multiplication function // Compute C = A * B // hA is the height of A (i.e. # rows) // wA is the width of A (i.e. # cols) // wB is the width of B void dMul(const double* A, const double* B, int hA, int wA, int wB, double* C) { int size; // Load A and B to the device double* Ad; size = hA * wA * sizeof(double); hipError_t err = hipMalloc((void**)&Ad, size); //printf("CUDA malloc A: %s \n", hipGetErrorString(err)); err = hipMemcpy(Ad, A, size, hipMemcpyHostToDevice); //printf("Copy A to device: %s \n", hipGetErrorString(err)); double* Bd; int hB = wA; // #rows of B == #columns of A size = hB * wB * sizeof(double); err = hipMalloc((void**)&Bd, size); // printf("CUDA malloc B: %s \n", hipGetErrorString(err)); err = hipMemcpy(Bd, B, size, hipMemcpyHostToDevice); //printf("Copy B to device: %s \n", hipGetErrorString(err)); // Allocate C on the device double* Cd; int hC = hA; // #rows of C == #rows of A int wC = wB; // #columns of C == #columns of B size = hC * wC * sizeof(double); err = hipMalloc((void**)&Cd, size); // printf("CUDA malloc C: %s \n", hipGetErrorString(err)); // Compute the execution configuration assuming // the matrix dimensions are multiples of BLOCK_SIZE /******************** calculates the execution configuration effectively the kernel function <Muld> will be executed concurrently by BLOCK_SIZE^2 GPU threads ************************/ dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((wB + dimBlock.x-1)/dimBlock.x, (hA+dimBlock.y-1) / dimBlock.y); // Launch the device computation hipLaunchKernelGGL(( dMuld), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd, hA, wA, wC, Cd); err = hipDeviceSynchronize(); //printf("Run kernel: %s \n", hipGetErrorString(err)); // Read C from the device err = hipMemcpy(C, Cd, size, hipMemcpyDeviceToHost); // printf("Copy C off the device: %s \n", hipGetErrorString(err)); // Free device memory hipFree(Ad); hipFree(Bd); hipFree(Cd); } // add matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cma(JNIEnv *env, jobject obj, jfloatArray aArray, jfloatArray bArray, jfloatArray cArray) { jfloat *a = env->GetFloatArrayElements( aArray, 0); jfloat *b = env->GetFloatArrayElements( bArray, 0); jfloat *c = env->GetFloatArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); cuda_matrixAdd(a, b, c, N); env->ReleaseFloatArrayElements( aArray, a, 0); env->ReleaseFloatArrayElements( bArray, b, 0); env->ReleaseFloatArrayElements( cArray, c, 0); } // add matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmad(JNIEnv *env, jobject obj, jdoubleArray aArray, jdoubleArray bArray, jdoubleArray cArray) { jdouble *a = env->GetDoubleArrayElements( aArray, 0); jdouble *b = env->GetDoubleArrayElements( bArray, 0); jdouble *c = env->GetDoubleArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); dcuda_matrixAdd(a, b, c, N); env->ReleaseDoubleArrayElements( aArray, a, 0); env->ReleaseDoubleArrayElements( bArray, b, 0); env->ReleaseDoubleArrayElements( cArray, c, 0); } // multiply with a scalar extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmscalar(JNIEnv *env, jobject obj, jfloatArray aArray, jfloat value, jfloatArray cArray) { jfloat *a = env->GetFloatArrayElements( aArray, 0); jfloat *c = env->GetFloatArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); cuda_matrixMulScalar(a, value, c, N); env->ReleaseFloatArrayElements( aArray, a, 0); env->ReleaseFloatArrayElements( cArray, c, 0); } // multiply with a scalar extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmscalard(JNIEnv *env, jobject obj, jdoubleArray aArray, jdouble value, jdoubleArray cArray) { jdouble *a = env->GetDoubleArrayElements( aArray, 0); jdouble *c = env->GetDoubleArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); dcuda_matrixMulScalar(a, value, c, N); env->ReleaseDoubleArrayElements( aArray, a, 0); env->ReleaseDoubleArrayElements( cArray, c, 0); } // subtract matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cms(JNIEnv *env, jobject obj, jfloatArray aArray, jfloatArray bArray, jfloatArray cArray) { jfloat *a = env->GetFloatArrayElements( aArray, 0); jfloat *b = env->GetFloatArrayElements( bArray, 0); jfloat *c = env->GetFloatArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); cuda_matrixSubtract(a, b, c, N); env->ReleaseFloatArrayElements( aArray, a, 0); env->ReleaseFloatArrayElements( bArray, b, 0); env->ReleaseFloatArrayElements( cArray, c, 0); } // subtract matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmsd(JNIEnv *env, jobject obj, jdoubleArray aArray, jdoubleArray bArray, jdoubleArray cArray) { jdouble *a = env->GetDoubleArrayElements( aArray, 0); jdouble *b = env->GetDoubleArrayElements( bArray, 0); jdouble *c = env->GetDoubleArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); dcuda_matrixSubtract(a, b, c, N); env->ReleaseDoubleArrayElements( aArray, a, 0); env->ReleaseDoubleArrayElements( bArray, b, 0); env->ReleaseDoubleArrayElements( cArray, c, 0); } // multiply matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmm(JNIEnv *env, jobject obj, jfloatArray aArray, jfloatArray bArray, jfloatArray cArray, jint hA, jint wA, jint wB) { jfloat *a = env->GetFloatArrayElements( aArray, 0); jfloat *b = env->GetFloatArrayElements( bArray, 0); jfloat *c = env->GetFloatArrayElements( cArray, 0); // call the C multiplication routine Mul(a, b, hA, wA, wB, c); env->ReleaseFloatArrayElements( aArray, a, 0); env->ReleaseFloatArrayElements( bArray, b, 0); env->ReleaseFloatArrayElements( cArray, c, 0); } // multiply matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmmd(JNIEnv *env, jobject obj, jdoubleArray aArray, jdoubleArray bArray, jdoubleArray cArray, jint hA, jint wA, jint wB) { jdouble *a = env->GetDoubleArrayElements( aArray, 0); jdouble *b = env->GetDoubleArrayElements( bArray, 0); jdouble *c = env->GetDoubleArrayElements( cArray, 0); // call the C multiplication routine dMul(a, b, hA, wA, wB, c); env->ReleaseDoubleArrayElements( aArray, a, 0); env->ReleaseDoubleArrayElements( bArray, b, 0); env->ReleaseDoubleArrayElements( cArray, c, 0); } extern "C" JNIEXPORT jint JNICALL Java_CUDAOps_KernelOps_sgemm (JNIEnv *env, jobject obj, jfloatArray h_a, jint hA, jint wA, jfloatArray h_b, jint wB, jfloatArray h_c) { jfloat *ha = env->GetFloatArrayElements(h_a, 0); jfloat *hb = env->GetFloatArrayElements(h_b, 0); jfloat *hc = env->GetFloatArrayElements(h_c, 0); simple_sgemm(ha, hA, wA, hb, wB, hc); env->ReleaseFloatArrayElements( h_a, ha, 0); env->ReleaseFloatArrayElements( h_b, hb, 0); env->ReleaseFloatArrayElements( h_c, hc, 0); return 0; } extern "C" JNIEXPORT jint JNICALL Java_CUDAOps_KernelOps_dgemm (JNIEnv *env, jobject obj, jdoubleArray h_a, jint hA, jint wA, jdoubleArray h_b, jint wB, jdoubleArray h_c) { jdouble *ha = env->GetDoubleArrayElements(h_a, 0); jdouble *hb = env->GetDoubleArrayElements(h_b, 0); jdouble *hc = env->GetDoubleArrayElements(h_c, 0); simple_dgemm(ha, hA, wA, hb, wB, hc); env->ReleaseDoubleArrayElements( h_a, ha, 0); env->ReleaseDoubleArrayElements( h_b, hb, 0); env->ReleaseDoubleArrayElements( h_c, hc, 0); return 0; } extern "C" JNIEXPORT jstring JNICALL Java_CUDAOps_KernelOps_getCUDADeviceInfo (JNIEnv *env, jobject obj ) { FILE * pFile = fopen("temp.txt", "w"); jstring ret; char *s = "Fail"; fprintf(pFile, " CUDA Device Query (Runtime API) version (CUDART static linking)\n\n"); int deviceCount = 0; hipError_t error_id = hipGetDeviceCount(&deviceCount); if (error_id != hipSuccess) { fprintf(pFile,"hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id)); ret = env->NewStringUTF(s); return ret; } // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0) { fprintf(pFile, "There are no available device(s) that support CUDA\n"); } else { fprintf(pFile, "Detected %d CUDA Capable device(s)\n", deviceCount); } int dev, driverVersion = 0, runtimeVersion = 0; for (dev = 0; dev < deviceCount; ++dev) { hipSetDevice(dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); fprintf(pFile, "\nDevice %d: \"%s\"\n", dev, deviceProp.name); // Console log hipDriverGetVersion(&driverVersion); hipRuntimeGetVersion(&runtimeVersion); fprintf(pFile, " CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); fprintf(pFile, " CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); char msg[256]; sprintf(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); fprintf(pFile, "%s", msg); fprintf(pFile, " (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); fprintf(pFile, " GPU Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 5000 // This is supported in CUDA 5.0 (runtime API device properties) fprintf(pFile, " Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f); fprintf(pFile, " Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize) { fprintf(pFile, " L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } #else // This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API) int memoryClock; hipDeviceGetAttribute(&memoryClock, hipDeviceAttributeMemoryClockRate, dev); fprintf(pFile, " Memory Clock rate: %.0f Mhz\n", memoryClock * 1e-3f); int memBusWidth; hipDeviceGetAttribute(&memBusWidth, hipDeviceAttributeMemoryBusWidth, dev); fprintf(pFile, " Memory Bus Width: %d-bit\n", memBusWidth); int L2CacheSize; hipDeviceGetAttribute(&L2CacheSize, hipDeviceAttributeL2CacheSize, dev); if (L2CacheSize) { fprintf(pFile, " L2 Cache Size: %d bytes\n", L2CacheSize); } #endif fprintf(pFile, " Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); fprintf(pFile, " Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]); fprintf(pFile, " Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); fprintf(pFile, " Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); fprintf(pFile, " Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); fprintf(pFile, " Total number of registers available per block: %d\n", deviceProp.regsPerBlock); fprintf(pFile, " Warp size: %d\n", deviceProp.warpSize); fprintf(pFile, " Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); fprintf(pFile, " Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); fprintf(pFile, " Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); fprintf(pFile, " Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); fprintf(pFile, " Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); fprintf(pFile, " Texture alignment: %lu bytes\n", deviceProp.textureAlignment); fprintf(pFile, " Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); fprintf(pFile, " Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); fprintf(pFile, " Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); fprintf(pFile, " Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); fprintf(pFile, " Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); fprintf(pFile, " Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); #ifdef WIN32 fprintf(pFile, " CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)"); #endif fprintf(pFile, " Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); fprintf(pFile, " Device PCI Bus ID / PCI location ID: %d / %d\n", deviceProp.pciBusID, deviceProp.pciDeviceID); const char *sComputeMode[] = { "Default (multiple host threads can use ::hipSetDevice() with device simultaneously)", "Exclusive (only one host thread in one process is able to use ::hipSetDevice() with this device)", "Prohibited (no host thread can use ::hipSetDevice() with this device)", "Exclusive Process (many threads in one process is able to use ::hipSetDevice() with this device)", "Unknown", NULL }; fprintf(pFile, " Compute Mode:\n"); fprintf(pFile, " < %s >\n", sComputeMode[deviceProp.computeMode]); } // If there are 2 or more GPUs, query to determine whether RDMA is supported if (deviceCount >= 2) { hipDeviceProp_t prop[64]; int gpuid[64]; // we want to find the first two GPU's that can support P2P int gpu_p2p_count = 0; for (int i=0; i < deviceCount; i++) { checkCudaErrors(hipGetDeviceProperties(&prop[i], i)); // Only boards based on Fermi or later can support P2P if ((prop[i].major >= 2) #ifdef _WIN32 // on Windows (64-bit), the Tesla Compute Cluster driver for windows must be enabled to supprot this && prop[i].tccDriver #endif ) { // This is an array of P2P capable GPUs gpuid[gpu_p2p_count++] = i; } } // Show all the combinations of support P2P GPUs int can_access_peer_0_1, can_access_peer_1_0; if (gpu_p2p_count >= 2) { for (int i = 0; i < gpu_p2p_count-1; i++) { for (int j = 1; j < gpu_p2p_count; j++) { checkCudaErrors(hipDeviceCanAccessPeer(&can_access_peer_0_1, gpuid[i], gpuid[j])); fprintf(pFile, "> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[i]].name, gpuid[i], prop[gpuid[j]].name, gpuid[j] , can_access_peer_0_1 ? "Yes" : "No"); } } for (int j = 1; j < gpu_p2p_count; j++) { for (int i = 0; i < gpu_p2p_count-1; i++) { checkCudaErrors(hipDeviceCanAccessPeer(&can_access_peer_1_0, gpuid[j], gpuid[i])); fprintf(pFile, "> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[j]].name, gpuid[j], prop[gpuid[i]].name, gpuid[i] , can_access_peer_1_0 ? "Yes" : "No"); } } } } // csv masterlog info // ***************************** // exe and CUDA driver name fprintf(pFile, "\n"); std::string sProfileString = "deviceQuery, CUDA Driver = CUDART"; char cTemp[16]; // driver version sProfileString += ", CUDA Driver Version = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d.%d", driverVersion/1000, (driverVersion%100)/10); #else sprintf(cTemp, "%d.%d", driverVersion/1000, (driverVersion%100)/10); #endif sProfileString += cTemp; // Runtime version sProfileString += ", CUDA Runtime Version = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10); #else sprintf(cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10); #endif sProfileString += cTemp; // Device count sProfileString += ", NumDevs = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d", deviceCount); #else sprintf(cTemp, "%d", deviceCount); #endif sProfileString += cTemp; // Print Out all device Names for (dev = 0; dev < deviceCount; ++dev) { #ifdef _WIN32 sprintf_s(cTemp, 13, ", Device%d = ", dev); #else sprintf(cTemp, ", Device%d = ", dev); #endif hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); sProfileString += cTemp; sProfileString += deviceProp.name; } sProfileString += "\n"; fprintf(pFile, "%s", sProfileString.c_str()); fclose(pFile); // read back the text contents of the file long f_size; char* code; size_t code_s, result; FILE* fp = fopen("temp.txt", "r"); fseek(fp, 0, SEEK_END); f_size = ftell(fp); /* This returns 29696, but file is 85 bytes */ fseek(fp, 0, SEEK_SET); code_s = sizeof(char) * f_size; code = (char *)malloc(code_s); result = fread(code, 1, f_size, fp); ret = env->NewStringUTF(code); return ret; }
f4a8a8d80dfd7450f64edb1751c7785d1ba38adf.cu
// this file implements CUDA functionality for ScalaLab. // it is compiled with the NVIDIA nvcc compiler // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <memory> #include <iostream> #include "CUDAOps_KernelOps.h" #include <cufft.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <helper_cuda.h> // Thread block size #define BLOCK_SIZE 16 inline bool IsGPUCapableP2P(cudaDeviceProp *pProp) { #ifdef _WIN32 return (bool)(pProp->tccDriver ? true : false); #else return (bool)(pProp->major >= 2); #endif } inline bool IsAppBuiltAs64() { #if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) return 1; #else return 0; #endif } // perform FFT using CUFFT library of CUDA void cudafft( cufftReal * hinData, int NX, float *hOut ) { cufftHandle plan; cufftComplex *dfftdata; cudaMalloc( (void **)&dfftdata, sizeof(cufftComplex)*(NX/2+1)); if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Cuda error: Failed to allocate\n"); return; } /* Copy input float array to the dfftdata device array that consists of cufftComplex data */ cudaMemcpy(dfftdata, hinData, NX*sizeof(float), cudaMemcpyHostToDevice); if (cufftPlan1d(&plan, NX, CUFFT_R2C, 1) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return; } // use the CUFFT plan to transform the signal in place if (cufftExecR2C(plan, (cufftReal *) dfftdata, dfftdata ) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT error: ExecC2C Forward failed"); return; } if (cudaThreadSynchronize() != cudaSuccess) { fprintf(stderr, "Cuda error: Failed to synchronize \n"); return; } // copy computed results in device space to host cudaMemcpy( hOut, dfftdata, NX*sizeof(float), cudaMemcpyDeviceToHost); cufftDestroy(plan); } // JNI function to call the C cudafft() routine JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cudafft (JNIEnv *env, jobject obj, jfloatArray inData, jint N, jfloatArray outRealImsFFT) { jfloat *data = env->GetFloatArrayElements(inData, 0); jfloat *outRealIms = env->GetFloatArrayElements(outRealImsFFT, 0); // perform the FFT cudafft( data, N, outRealIms ); env->ReleaseFloatArrayElements( inData, data, 0); env->ReleaseFloatArrayElements( outRealImsFFT, outRealIms, 0); } static int simple_sgemm(const float *h_A, int hA, int wA, const float *h_B, int wB, float *h_C) { int hB = wA; int hC = hA; int wC = wB; float alpha = 1.0f; float beta = 0.0f; float *d_A = 0; float *d_B = 0; float *d_C = 0; cublasHandle_t handle; cublasStatus_t status; /* Initialize CUBLAS */ status = cublasCreate(&handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return EXIT_FAILURE; } /* Allocate device memory for the matrices */ if (cudaMalloc((void **)&d_A, hA * wA * sizeof(d_A[0])) != cudaSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate A)\n"); return EXIT_FAILURE; } if (cudaMalloc((void **)&d_B, hB * wB * sizeof(d_B[0])) != cudaSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate B)\n"); return EXIT_FAILURE; } if (cudaMalloc((void **)&d_C, hC * wC * sizeof(d_C[0])) != cudaSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate C)\n"); return EXIT_FAILURE; } /* Initialize the device matrices with the host matrices */ cudaMemcpy( d_A, h_A, hA*wA*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( d_B, h_B, hB*wB*sizeof(float), cudaMemcpyHostToDevice); int lda = hA; int ldb = hB; int ldc = hC; /* Performs operation using cublas */ status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, hA, wB, wA, &alpha, d_A, lda, d_B, ldb, &beta, d_C, ldc); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! kernel execution error.\n"); return EXIT_FAILURE; } cudaMemcpy( h_C, d_C, hC*wC*sizeof(float), cudaMemcpyDeviceToHost); /* Memory clean up */ if (cudaFree(d_A) != cudaSuccess) { fprintf(stderr, "!!!! memory free error (A)\n"); return EXIT_FAILURE; } if (cudaFree(d_B) != cudaSuccess) { fprintf(stderr, "!!!! memory free error (B)\n"); return EXIT_FAILURE; } if (cudaFree(d_C) != cudaSuccess) { fprintf(stderr, "!!!! memory free error (C)\n"); return EXIT_FAILURE; } /* Shutdown */ status = cublasDestroy(handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return EXIT_FAILURE; } return 1; } static int simple_dgemm(const double *h_A, int hA, int wA, const double *h_B, int wB, double *h_C) { int hB = wA; int hC = hA; int wC = wB; double alpha = 1.0; double beta = 0.0; double *d_A = 0; double *d_B = 0; double *d_C = 0; cublasHandle_t handle; cublasStatus_t status; /* Initialize CUBLAS */ status = cublasCreate(&handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return EXIT_FAILURE; } /* Allocate device memory for the matrices */ if (cudaMalloc((void **)&d_A, hA * wA * sizeof(d_A[0])) != cudaSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate A)\n"); return EXIT_FAILURE; } if (cudaMalloc((void **)&d_B, hB * wB * sizeof(d_B[0])) != cudaSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate B)\n"); return EXIT_FAILURE; } if (cudaMalloc((void **)&d_C, hC * wC * sizeof(d_C[0])) != cudaSuccess) { fprintf(stderr, "!!!! device memory allocation error (allocate C)\n"); return EXIT_FAILURE; } /* Initialize the device matrices with the host matrices */ cudaMemcpy( d_A, h_A, hA*wA*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( d_B, h_B, hB*wB*sizeof(double), cudaMemcpyHostToDevice); int lda = hA; int ldb = hB; int ldc = hC; /* Performs operation using cublas */ status = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, hA, wB, wA, &alpha, d_A, lda, d_B, ldb, &beta, d_C, ldc); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! kernel execution error.\n"); return EXIT_FAILURE; } cudaMemcpy( h_C, d_C, hC*wC*sizeof(double), cudaMemcpyDeviceToHost); /* Memory clean up */ if (cudaFree(d_A) != cudaSuccess) { fprintf(stderr, "!!!! memory free error (A)\n"); return EXIT_FAILURE; } if (cudaFree(d_B) != cudaSuccess) { fprintf(stderr, "!!!! memory free error (B)\n"); return EXIT_FAILURE; } if (cudaFree(d_C) != cudaSuccess) { fprintf(stderr, "!!!! memory free error (C)\n"); return EXIT_FAILURE; } /* Shutdown */ status = cublasDestroy(handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return EXIT_FAILURE; } return 1; } // kernels for matrix addition/subtraction/multiply with scalar // however, due to tranfer delays, linear time operations like matrix addition / subtraction / multiply with scalar // runs usually faster in host CPU __global__ void add_matrix(float *a, float *b, float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx] + b[idx]; } __global__ void dadd_matrix(double *a, double *b, double *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx] + b[idx]; } __global__ void subtract_matrix(float *a, float *b, float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx] - b[idx]; } __global__ void dsubtract_matrix(double *a, double *b, double *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx] - b[idx]; } __global__ void dmul_Scalar_matrix(double *a, double value, double *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx]*value; } __global__ void mul_Scalar_matrix(float *a, float value, float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx]*value; } void cuda_matrixAdd(float *a_h, float *b_h, float *c_h, int N) { float *a_d, *b_d, *c_d; size_t size = N * sizeof (float); // allocate memory in the GPU device for a, b and c cudaMalloc((void **) & a_d, size); cudaMalloc((void **) & b_d, size); cudaMalloc((void **) & c_d, size); // copy from host to GPU device cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); add_matrix <<<n_blocks, block_size >>>(a_d, b_d, c_d, N); // Retrieve results from the device cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); // Cleanup cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); } void dcuda_matrixAdd(double *a_h, double *b_h, double *c_h, int N) { double *a_d, *b_d, *c_d; size_t size = N * sizeof (double); // allocate memory in the GPU device for a, b and c cudaMalloc((void **) & a_d, size); cudaMalloc((void **) & b_d, size); cudaMalloc((void **) & c_d, size); // copy from host to GPU device cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); dadd_matrix <<<n_blocks, block_size >>>(a_d, b_d, c_d, N); // Retrieve results from the device cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); // Cleanup cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); } void cuda_matrixSubtract(float *a_h, float *b_h, float *c_h, int N) { float *a_d, *b_d, *c_d; size_t size = N * sizeof (float); // allocate memory in the GPU device for a, b and c cudaMalloc((void **) & a_d, size); cudaMalloc((void **) & b_d, size); cudaMalloc((void **) & c_d, size); // copy from host to GPU device cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); subtract_matrix <<<n_blocks, block_size >>>(a_d, b_d, c_d, N); // Retrieve results from the device cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); // Cleanup cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); } void dcuda_matrixSubtract(double *a_h, double *b_h, double *c_h, int N) { double *a_d, *b_d, *c_d; size_t size = N * sizeof (double); // allocate memory in the GPU device for a, b and c cudaMalloc((void **) & a_d, size); cudaMalloc((void **) & b_d, size); cudaMalloc((void **) & c_d, size); // copy from host to GPU device cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); dsubtract_matrix <<<n_blocks, block_size >>>(a_d, b_d, c_d, N); // Retrieve results from the device cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); // Cleanup cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); } void cuda_matrixMulScalar(float *a_h, float scalarValue, float *c_h, int N) { float *a_d, *c_d; size_t size = N * sizeof (float); // allocate memory in the GPU device for a, b and c cudaMalloc((void **) & a_d, size); cudaMalloc((void **) & c_d, size); // copy from host to GPU device cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); mul_Scalar_matrix <<<n_blocks, block_size >>>(a_d, scalarValue, c_d, N); // Retrieve results from the device cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); // Cleanup cudaFree(a_d); cudaFree(c_d); } void dcuda_matrixMulScalar(double *a_h, double scalarValue, double *c_h, int N) { double *a_d, *c_d; size_t size = N * sizeof (double); // allocate memory in the GPU device for a, b and c cudaMalloc((void **) & a_d, size); cudaMalloc((void **) & c_d, size); // copy from host to GPU device cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); // do calculations on device int block_size = 256; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); dmul_Scalar_matrix <<<n_blocks, block_size >>>(a_d, scalarValue, c_d, N); // Retrieve results from the device cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); // Cleanup cudaFree(a_d); cudaFree(c_d); } // Device multiplication function called by Mul() // Compute C = A * B // hA is the height of A (i.e. # rows) // wA is the width of A (i.e. # columns) // wB is the width of B __global__ void Muld(float* A, float* B, int hA, int wA, int wC, float* C) { // each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0.0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row >= hA || col >= wC) return; for (int e=0; e<wA; ++e) Cvalue += (A[row*wA+e]) *(B[e*wC+col]); C[row*wC+col] = Cvalue; } // Device multiplication function called by Mul() // Compute C = A * B // hA is the height of A (i.e. # rows) // wA is the width of A (i.e. # columns) // wB is the width of B __global__ void dMuld(double* A, double* B, int hA, int wA, int wC, double* C) { // each thread computes one element of C // by accumulating results into Cvalue double Cvalue = 0.0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row >= hA || col >= wC) return; for (int e=0; e<wA; ++e) Cvalue += (A[row*wA+e]) *(B[e*wC+col]); C[row*wC+col] = Cvalue; } // Host multiplication function // Compute C = A * B // hA is the height of A (i.e. # rows) // wA is the width of A (i.e. # cols) // wB is the width of B void Mul(const float* A, const float* B, int hA, int wA, int wB, float* C) { int size; // Load A and B to the device float* Ad; size = hA * wA * sizeof(float); cudaError_t err = cudaMalloc((void**)&Ad, size); //printf("CUDA malloc A: %s \n", cudaGetErrorString(err)); err = cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice); //printf("Copy A to device: %s \n", cudaGetErrorString(err)); float* Bd; int hB = wA; // #rows of B == #columns of A size = hB * wB * sizeof(float); err = cudaMalloc((void**)&Bd, size); // printf("CUDA malloc B: %s \n", cudaGetErrorString(err)); err = cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice); //printf("Copy B to device: %s \n", cudaGetErrorString(err)); // Allocate C on the device float* Cd; int hC = hA; // #rows of C == #rows of A int wC = wB; // #columns of C == #columns of B size = hC * wC * sizeof(float); err = cudaMalloc((void**)&Cd, size); // printf("CUDA malloc C: %s \n", cudaGetErrorString(err)); // Compute the execution configuration assuming // the matrix dimensions are multiples of BLOCK_SIZE /******************** calculates the execution configuration effectively the kernel function <Muld> will be executed concurrently by BLOCK_SIZE^2 GPU threads ************************/ dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((wB + dimBlock.x-1)/dimBlock.x, (hA+dimBlock.y-1) / dimBlock.y); // Launch the device computation Muld<<<dimGrid, dimBlock>>>(Ad, Bd, hA, wA, wC, Cd); err = cudaThreadSynchronize(); //printf("Run kernel: %s \n", cudaGetErrorString(err)); // Read C from the device err = cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost); // printf("Copy C off the device: %s \n", cudaGetErrorString(err)); // Free device memory cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); } // Host multiplication function // Compute C = A * B // hA is the height of A (i.e. # rows) // wA is the width of A (i.e. # cols) // wB is the width of B void dMul(const double* A, const double* B, int hA, int wA, int wB, double* C) { int size; // Load A and B to the device double* Ad; size = hA * wA * sizeof(double); cudaError_t err = cudaMalloc((void**)&Ad, size); //printf("CUDA malloc A: %s \n", cudaGetErrorString(err)); err = cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice); //printf("Copy A to device: %s \n", cudaGetErrorString(err)); double* Bd; int hB = wA; // #rows of B == #columns of A size = hB * wB * sizeof(double); err = cudaMalloc((void**)&Bd, size); // printf("CUDA malloc B: %s \n", cudaGetErrorString(err)); err = cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice); //printf("Copy B to device: %s \n", cudaGetErrorString(err)); // Allocate C on the device double* Cd; int hC = hA; // #rows of C == #rows of A int wC = wB; // #columns of C == #columns of B size = hC * wC * sizeof(double); err = cudaMalloc((void**)&Cd, size); // printf("CUDA malloc C: %s \n", cudaGetErrorString(err)); // Compute the execution configuration assuming // the matrix dimensions are multiples of BLOCK_SIZE /******************** calculates the execution configuration effectively the kernel function <Muld> will be executed concurrently by BLOCK_SIZE^2 GPU threads ************************/ dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((wB + dimBlock.x-1)/dimBlock.x, (hA+dimBlock.y-1) / dimBlock.y); // Launch the device computation dMuld<<<dimGrid, dimBlock>>>(Ad, Bd, hA, wA, wC, Cd); err = cudaThreadSynchronize(); //printf("Run kernel: %s \n", cudaGetErrorString(err)); // Read C from the device err = cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost); // printf("Copy C off the device: %s \n", cudaGetErrorString(err)); // Free device memory cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); } // add matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cma(JNIEnv *env, jobject obj, jfloatArray aArray, jfloatArray bArray, jfloatArray cArray) { jfloat *a = env->GetFloatArrayElements( aArray, 0); jfloat *b = env->GetFloatArrayElements( bArray, 0); jfloat *c = env->GetFloatArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); cuda_matrixAdd(a, b, c, N); env->ReleaseFloatArrayElements( aArray, a, 0); env->ReleaseFloatArrayElements( bArray, b, 0); env->ReleaseFloatArrayElements( cArray, c, 0); } // add matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmad(JNIEnv *env, jobject obj, jdoubleArray aArray, jdoubleArray bArray, jdoubleArray cArray) { jdouble *a = env->GetDoubleArrayElements( aArray, 0); jdouble *b = env->GetDoubleArrayElements( bArray, 0); jdouble *c = env->GetDoubleArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); dcuda_matrixAdd(a, b, c, N); env->ReleaseDoubleArrayElements( aArray, a, 0); env->ReleaseDoubleArrayElements( bArray, b, 0); env->ReleaseDoubleArrayElements( cArray, c, 0); } // multiply with a scalar extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmscalar(JNIEnv *env, jobject obj, jfloatArray aArray, jfloat value, jfloatArray cArray) { jfloat *a = env->GetFloatArrayElements( aArray, 0); jfloat *c = env->GetFloatArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); cuda_matrixMulScalar(a, value, c, N); env->ReleaseFloatArrayElements( aArray, a, 0); env->ReleaseFloatArrayElements( cArray, c, 0); } // multiply with a scalar extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmscalard(JNIEnv *env, jobject obj, jdoubleArray aArray, jdouble value, jdoubleArray cArray) { jdouble *a = env->GetDoubleArrayElements( aArray, 0); jdouble *c = env->GetDoubleArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); dcuda_matrixMulScalar(a, value, c, N); env->ReleaseDoubleArrayElements( aArray, a, 0); env->ReleaseDoubleArrayElements( cArray, c, 0); } // subtract matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cms(JNIEnv *env, jobject obj, jfloatArray aArray, jfloatArray bArray, jfloatArray cArray) { jfloat *a = env->GetFloatArrayElements( aArray, 0); jfloat *b = env->GetFloatArrayElements( bArray, 0); jfloat *c = env->GetFloatArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); cuda_matrixSubtract(a, b, c, N); env->ReleaseFloatArrayElements( aArray, a, 0); env->ReleaseFloatArrayElements( bArray, b, 0); env->ReleaseFloatArrayElements( cArray, c, 0); } // subtract matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmsd(JNIEnv *env, jobject obj, jdoubleArray aArray, jdoubleArray bArray, jdoubleArray cArray) { jdouble *a = env->GetDoubleArrayElements( aArray, 0); jdouble *b = env->GetDoubleArrayElements( bArray, 0); jdouble *c = env->GetDoubleArrayElements( cArray, 0); jsize N = env->GetArrayLength( aArray); dcuda_matrixSubtract(a, b, c, N); env->ReleaseDoubleArrayElements( aArray, a, 0); env->ReleaseDoubleArrayElements( bArray, b, 0); env->ReleaseDoubleArrayElements( cArray, c, 0); } // multiply matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmm(JNIEnv *env, jobject obj, jfloatArray aArray, jfloatArray bArray, jfloatArray cArray, jint hA, jint wA, jint wB) { jfloat *a = env->GetFloatArrayElements( aArray, 0); jfloat *b = env->GetFloatArrayElements( bArray, 0); jfloat *c = env->GetFloatArrayElements( cArray, 0); // call the C multiplication routine Mul(a, b, hA, wA, wB, c); env->ReleaseFloatArrayElements( aArray, a, 0); env->ReleaseFloatArrayElements( bArray, b, 0); env->ReleaseFloatArrayElements( cArray, c, 0); } // multiply matrices extern "C" JNIEXPORT void JNICALL Java_CUDAOps_KernelOps_cmmd(JNIEnv *env, jobject obj, jdoubleArray aArray, jdoubleArray bArray, jdoubleArray cArray, jint hA, jint wA, jint wB) { jdouble *a = env->GetDoubleArrayElements( aArray, 0); jdouble *b = env->GetDoubleArrayElements( bArray, 0); jdouble *c = env->GetDoubleArrayElements( cArray, 0); // call the C multiplication routine dMul(a, b, hA, wA, wB, c); env->ReleaseDoubleArrayElements( aArray, a, 0); env->ReleaseDoubleArrayElements( bArray, b, 0); env->ReleaseDoubleArrayElements( cArray, c, 0); } extern "C" JNIEXPORT jint JNICALL Java_CUDAOps_KernelOps_sgemm (JNIEnv *env, jobject obj, jfloatArray h_a, jint hA, jint wA, jfloatArray h_b, jint wB, jfloatArray h_c) { jfloat *ha = env->GetFloatArrayElements(h_a, 0); jfloat *hb = env->GetFloatArrayElements(h_b, 0); jfloat *hc = env->GetFloatArrayElements(h_c, 0); simple_sgemm(ha, hA, wA, hb, wB, hc); env->ReleaseFloatArrayElements( h_a, ha, 0); env->ReleaseFloatArrayElements( h_b, hb, 0); env->ReleaseFloatArrayElements( h_c, hc, 0); return 0; } extern "C" JNIEXPORT jint JNICALL Java_CUDAOps_KernelOps_dgemm (JNIEnv *env, jobject obj, jdoubleArray h_a, jint hA, jint wA, jdoubleArray h_b, jint wB, jdoubleArray h_c) { jdouble *ha = env->GetDoubleArrayElements(h_a, 0); jdouble *hb = env->GetDoubleArrayElements(h_b, 0); jdouble *hc = env->GetDoubleArrayElements(h_c, 0); simple_dgemm(ha, hA, wA, hb, wB, hc); env->ReleaseDoubleArrayElements( h_a, ha, 0); env->ReleaseDoubleArrayElements( h_b, hb, 0); env->ReleaseDoubleArrayElements( h_c, hc, 0); return 0; } extern "C" JNIEXPORT jstring JNICALL Java_CUDAOps_KernelOps_getCUDADeviceInfo (JNIEnv *env, jobject obj ) { FILE * pFile = fopen("temp.txt", "w"); jstring ret; char *s = "Fail"; fprintf(pFile, " CUDA Device Query (Runtime API) version (CUDART static linking)\n\n"); int deviceCount = 0; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { fprintf(pFile,"cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); ret = env->NewStringUTF(s); return ret; } // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0) { fprintf(pFile, "There are no available device(s) that support CUDA\n"); } else { fprintf(pFile, "Detected %d CUDA Capable device(s)\n", deviceCount); } int dev, driverVersion = 0, runtimeVersion = 0; for (dev = 0; dev < deviceCount; ++dev) { cudaSetDevice(dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); fprintf(pFile, "\nDevice %d: \"%s\"\n", dev, deviceProp.name); // Console log cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); fprintf(pFile, " CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); fprintf(pFile, " CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); char msg[256]; sprintf(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); fprintf(pFile, "%s", msg); fprintf(pFile, " (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); fprintf(pFile, " GPU Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 5000 // This is supported in CUDA 5.0 (runtime API device properties) fprintf(pFile, " Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f); fprintf(pFile, " Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize) { fprintf(pFile, " L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } #else // This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API) int memoryClock; cuDeviceGetAttribute(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev); fprintf(pFile, " Memory Clock rate: %.0f Mhz\n", memoryClock * 1e-3f); int memBusWidth; cuDeviceGetAttribute(&memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev); fprintf(pFile, " Memory Bus Width: %d-bit\n", memBusWidth); int L2CacheSize; cuDeviceGetAttribute(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev); if (L2CacheSize) { fprintf(pFile, " L2 Cache Size: %d bytes\n", L2CacheSize); } #endif fprintf(pFile, " Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); fprintf(pFile, " Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]); fprintf(pFile, " Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); fprintf(pFile, " Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); fprintf(pFile, " Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); fprintf(pFile, " Total number of registers available per block: %d\n", deviceProp.regsPerBlock); fprintf(pFile, " Warp size: %d\n", deviceProp.warpSize); fprintf(pFile, " Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); fprintf(pFile, " Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); fprintf(pFile, " Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); fprintf(pFile, " Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); fprintf(pFile, " Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); fprintf(pFile, " Texture alignment: %lu bytes\n", deviceProp.textureAlignment); fprintf(pFile, " Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); fprintf(pFile, " Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); fprintf(pFile, " Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); fprintf(pFile, " Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); fprintf(pFile, " Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); fprintf(pFile, " Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); #ifdef WIN32 fprintf(pFile, " CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)"); #endif fprintf(pFile, " Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); fprintf(pFile, " Device PCI Bus ID / PCI location ID: %d / %d\n", deviceProp.pciBusID, deviceProp.pciDeviceID); const char *sComputeMode[] = { "Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)", "Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)", "Prohibited (no host thread can use ::cudaSetDevice() with this device)", "Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)", "Unknown", NULL }; fprintf(pFile, " Compute Mode:\n"); fprintf(pFile, " < %s >\n", sComputeMode[deviceProp.computeMode]); } // If there are 2 or more GPUs, query to determine whether RDMA is supported if (deviceCount >= 2) { cudaDeviceProp prop[64]; int gpuid[64]; // we want to find the first two GPU's that can support P2P int gpu_p2p_count = 0; for (int i=0; i < deviceCount; i++) { checkCudaErrors(cudaGetDeviceProperties(&prop[i], i)); // Only boards based on Fermi or later can support P2P if ((prop[i].major >= 2) #ifdef _WIN32 // on Windows (64-bit), the Tesla Compute Cluster driver for windows must be enabled to supprot this && prop[i].tccDriver #endif ) { // This is an array of P2P capable GPUs gpuid[gpu_p2p_count++] = i; } } // Show all the combinations of support P2P GPUs int can_access_peer_0_1, can_access_peer_1_0; if (gpu_p2p_count >= 2) { for (int i = 0; i < gpu_p2p_count-1; i++) { for (int j = 1; j < gpu_p2p_count; j++) { checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer_0_1, gpuid[i], gpuid[j])); fprintf(pFile, "> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[i]].name, gpuid[i], prop[gpuid[j]].name, gpuid[j] , can_access_peer_0_1 ? "Yes" : "No"); } } for (int j = 1; j < gpu_p2p_count; j++) { for (int i = 0; i < gpu_p2p_count-1; i++) { checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer_1_0, gpuid[j], gpuid[i])); fprintf(pFile, "> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[j]].name, gpuid[j], prop[gpuid[i]].name, gpuid[i] , can_access_peer_1_0 ? "Yes" : "No"); } } } } // csv masterlog info // ***************************** // exe and CUDA driver name fprintf(pFile, "\n"); std::string sProfileString = "deviceQuery, CUDA Driver = CUDART"; char cTemp[16]; // driver version sProfileString += ", CUDA Driver Version = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d.%d", driverVersion/1000, (driverVersion%100)/10); #else sprintf(cTemp, "%d.%d", driverVersion/1000, (driverVersion%100)/10); #endif sProfileString += cTemp; // Runtime version sProfileString += ", CUDA Runtime Version = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10); #else sprintf(cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10); #endif sProfileString += cTemp; // Device count sProfileString += ", NumDevs = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d", deviceCount); #else sprintf(cTemp, "%d", deviceCount); #endif sProfileString += cTemp; // Print Out all device Names for (dev = 0; dev < deviceCount; ++dev) { #ifdef _WIN32 sprintf_s(cTemp, 13, ", Device%d = ", dev); #else sprintf(cTemp, ", Device%d = ", dev); #endif cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); sProfileString += cTemp; sProfileString += deviceProp.name; } sProfileString += "\n"; fprintf(pFile, "%s", sProfileString.c_str()); fclose(pFile); // read back the text contents of the file long f_size; char* code; size_t code_s, result; FILE* fp = fopen("temp.txt", "r"); fseek(fp, 0, SEEK_END); f_size = ftell(fp); /* This returns 29696, but file is 85 bytes */ fseek(fp, 0, SEEK_SET); code_s = sizeof(char) * f_size; code = (char *)malloc(code_s); result = fread(code, 1, f_size, fp); ret = env->NewStringUTF(code); return ret; }
6a136eefa16776005e50b71621c760909f2951e0.hip
// !!! This is a file automatically generated by hipify!!! #include "./kern_impl.cuinl" namespace megdnn { namespace cuda { namespace cumsum { #define INST_(T, Op, exclusive, reverse) \ template void run_kern<T, Op, exclusive, reverse>( \ T*, void*, uint32_t, uint32_t, uint32_t, uint32_t, const Op&, \ hipStream_t) #define INST(T) \ INST_(T, SumOp<T>, true, true); \ INST_(T, SumOp<T>, false, true); \ INST_(T, SumOp<T>, true, false); \ INST_(T, SumOp<T>, false, false); #define cb(DType) INST(typename DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) } // namespace cumsum } // namespace cuda } // namespace megdnn // vim: ft=cuda syntax=cuda.doxygen
6a136eefa16776005e50b71621c760909f2951e0.cu
#include "./kern_impl.cuinl" namespace megdnn { namespace cuda { namespace cumsum { #define INST_(T, Op, exclusive, reverse) \ template void run_kern<T, Op, exclusive, reverse>( \ T*, void*, uint32_t, uint32_t, uint32_t, uint32_t, const Op&, \ cudaStream_t) #define INST(T) \ INST_(T, SumOp<T>, true, true); \ INST_(T, SumOp<T>, false, true); \ INST_(T, SumOp<T>, true, false); \ INST_(T, SumOp<T>, false, false); #define cb(DType) INST(typename DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) } // namespace cumsum } // namespace cuda } // namespace megdnn // vim: ft=cuda syntax=cuda.doxygen
0a289f8f55cb8271345643b89c48504d94740a54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void ge_cdf_norm (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < fd); if (valid) { b[offset_b + gid_0 + gid_1 * ld_b] = CAST(normcdf)(a[offset_a + gid_0 + gid_1 * ld_a]); } }
0a289f8f55cb8271345643b89c48504d94740a54.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void ge_cdf_norm (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < fd); if (valid) { b[offset_b + gid_0 + gid_1 * ld_b] = CAST(normcdf)(a[offset_a + gid_0 + gid_1 * ld_a]); } }
71ae19459d3ed20038976289b266a49311f4ed4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010-2013 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include "defs.h" // Define your kernels in this file you may use more than one kernel if you // need to // INSERT KERNEL(S) HERE __global__ void exclusiveScan(unsigned int *out, unsigned int* in, unsigned int*sum, unsigned int inputSize) { __shared__ unsigned int temp[2 * BLOCK_SIZE]; int start = 2 * blockIdx.x * blockDim.x; int tx = threadIdx.x; int index = 0; if (start + tx < inputSize) { temp[tx] = in[start + tx]; } else { temp[tx] = 0; } if (start + tx + blockDim.x < inputSize) { temp[tx + blockDim.x] = in[start + tx + blockDim.x]; } else { temp[tx + blockDim.x] = 0; } __syncthreads(); // reduction step int stride = 1; while(stride <= blockDim.x) { index = (tx + 1) * 2 * stride - 1; if (index < (2 * blockDim.x)) { temp[index] += temp[index - stride]; } stride *= 2; __syncthreads(); } // first store the reduction sum in sum array // make it zero since it is exclusive scan if (tx == 0) { // sum array contains the prefix sum of each // 2*blockDim blocks of element.. if (sum != NULL) { sum[blockIdx.x] = temp[2*blockDim.x - 1]; } temp[2*blockDim.x - 1] = 0; } //wait for thread zero to write __syncthreads(); // post scan step stride = blockDim.x; index = 0; unsigned int var = 0; while(stride > 0) { index = (2 * stride * (tx + 1)) - 1; if (index < 2 * blockDim.x) { var = temp[index]; temp[index] += temp[index - stride]; temp[index-stride] = var; } stride >>= 1; __syncthreads(); } // now write the temp array to output if (start + tx < inputSize) { out[start + tx] = temp[tx]; } if(start + tx + blockDim.x < inputSize) { out[start + tx + blockDim.x] = temp[tx + blockDim.x]; } } __global__ void mergeScanBlocks(unsigned int *sum, unsigned int* output, int opSize) { int index = 2 * blockDim.x * blockIdx.x + threadIdx.x; if (index < opSize) { output[index] += sum[blockIdx.x]; } if (index + blockDim.x < opSize) { output[index + blockDim.x] += sum[blockIdx.x]; } } /****************************************************************************** Setup and invoke your kernel(s) in this function. You may also allocate more GPU memory if you need to *******************************************************************************/ void preScan(unsigned int *out, unsigned int *in, unsigned int in_size) { // INSERT CODE HERE hipError_t ret; unsigned int numBlocks1 = in_size / BLOCK_SIZE; if (in_size % BLOCK_SIZE) numBlocks1++; int numBlocks2 = numBlocks1 / 2; if(numBlocks1 % 2) numBlocks2++; dim3 dimThreadBlock; dimThreadBlock.x = BLOCK_SIZE; dimThreadBlock.y = 1; dimThreadBlock.z = 1; dim3 dimGrid; dimGrid.x = numBlocks2; dimGrid.y = 1; dimGrid.z = 1; unsigned int*sumArr_d = NULL; if (in_size > (2*BLOCK_SIZE)) { // we need the sum auxilarry array only if numblocks2 > 1 ret = hipMalloc((void**)&sumArr_d, numBlocks2 * sizeof(unsigned int)); if (ret != hipSuccess) FATAL("unable to create sum array"); } hipLaunchKernelGGL(( exclusiveScan), dim3(dimGrid), dim3(dimThreadBlock), 0, 0, out, in, sumArr_d, in_size); ret = hipDeviceSynchronize(); if (ret != hipSuccess) FATAL("unable to launch scan kernel"); if (in_size <= (2*BLOCK_SIZE)) { // out has proper exclusive scan. just return return; } else { // now we need to perform exclusive scan on the auxilliary sum array unsigned int *sumArr_scan_d; ret = hipMalloc((void**)&sumArr_scan_d, numBlocks2 * sizeof(unsigned int)); if (ret != hipSuccess) FATAL("unable to create sum scan array"); preScan(sumArr_scan_d, sumArr_d, numBlocks2); // sumAdd_scan_d now contains the exclusive scan op of individual blocks // now just do a one-one addition of blocks hipLaunchKernelGGL(( mergeScanBlocks), dim3(dimGrid),dim3(dimThreadBlock), 0, 0, sumArr_scan_d, out, in_size); ret = hipDeviceSynchronize(); if (ret != hipSuccess) FATAL("unable to launch merge scan kernel"); hipFree(sumArr_d); hipFree(sumArr_scan_d); } }
71ae19459d3ed20038976289b266a49311f4ed4f.cu
/****************************************************************************** *cr *cr (C) Copyright 2010-2013 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include "defs.h" // Define your kernels in this file you may use more than one kernel if you // need to // INSERT KERNEL(S) HERE __global__ void exclusiveScan(unsigned int *out, unsigned int* in, unsigned int*sum, unsigned int inputSize) { __shared__ unsigned int temp[2 * BLOCK_SIZE]; int start = 2 * blockIdx.x * blockDim.x; int tx = threadIdx.x; int index = 0; if (start + tx < inputSize) { temp[tx] = in[start + tx]; } else { temp[tx] = 0; } if (start + tx + blockDim.x < inputSize) { temp[tx + blockDim.x] = in[start + tx + blockDim.x]; } else { temp[tx + blockDim.x] = 0; } __syncthreads(); // reduction step int stride = 1; while(stride <= blockDim.x) { index = (tx + 1) * 2 * stride - 1; if (index < (2 * blockDim.x)) { temp[index] += temp[index - stride]; } stride *= 2; __syncthreads(); } // first store the reduction sum in sum array // make it zero since it is exclusive scan if (tx == 0) { // sum array contains the prefix sum of each // 2*blockDim blocks of element.. if (sum != NULL) { sum[blockIdx.x] = temp[2*blockDim.x - 1]; } temp[2*blockDim.x - 1] = 0; } //wait for thread zero to write __syncthreads(); // post scan step stride = blockDim.x; index = 0; unsigned int var = 0; while(stride > 0) { index = (2 * stride * (tx + 1)) - 1; if (index < 2 * blockDim.x) { var = temp[index]; temp[index] += temp[index - stride]; temp[index-stride] = var; } stride >>= 1; __syncthreads(); } // now write the temp array to output if (start + tx < inputSize) { out[start + tx] = temp[tx]; } if(start + tx + blockDim.x < inputSize) { out[start + tx + blockDim.x] = temp[tx + blockDim.x]; } } __global__ void mergeScanBlocks(unsigned int *sum, unsigned int* output, int opSize) { int index = 2 * blockDim.x * blockIdx.x + threadIdx.x; if (index < opSize) { output[index] += sum[blockIdx.x]; } if (index + blockDim.x < opSize) { output[index + blockDim.x] += sum[blockIdx.x]; } } /****************************************************************************** Setup and invoke your kernel(s) in this function. You may also allocate more GPU memory if you need to *******************************************************************************/ void preScan(unsigned int *out, unsigned int *in, unsigned int in_size) { // INSERT CODE HERE cudaError_t ret; unsigned int numBlocks1 = in_size / BLOCK_SIZE; if (in_size % BLOCK_SIZE) numBlocks1++; int numBlocks2 = numBlocks1 / 2; if(numBlocks1 % 2) numBlocks2++; dim3 dimThreadBlock; dimThreadBlock.x = BLOCK_SIZE; dimThreadBlock.y = 1; dimThreadBlock.z = 1; dim3 dimGrid; dimGrid.x = numBlocks2; dimGrid.y = 1; dimGrid.z = 1; unsigned int*sumArr_d = NULL; if (in_size > (2*BLOCK_SIZE)) { // we need the sum auxilarry array only if numblocks2 > 1 ret = cudaMalloc((void**)&sumArr_d, numBlocks2 * sizeof(unsigned int)); if (ret != cudaSuccess) FATAL("unable to create sum array"); } exclusiveScan<<<dimGrid, dimThreadBlock>>>(out, in, sumArr_d, in_size); ret = cudaDeviceSynchronize(); if (ret != cudaSuccess) FATAL("unable to launch scan kernel"); if (in_size <= (2*BLOCK_SIZE)) { // out has proper exclusive scan. just return return; } else { // now we need to perform exclusive scan on the auxilliary sum array unsigned int *sumArr_scan_d; ret = cudaMalloc((void**)&sumArr_scan_d, numBlocks2 * sizeof(unsigned int)); if (ret != cudaSuccess) FATAL("unable to create sum scan array"); preScan(sumArr_scan_d, sumArr_d, numBlocks2); // sumAdd_scan_d now contains the exclusive scan op of individual blocks // now just do a one-one addition of blocks mergeScanBlocks<<<dimGrid,dimThreadBlock>>>(sumArr_scan_d, out, in_size); ret = cudaDeviceSynchronize(); if (ret != cudaSuccess) FATAL("unable to launch merge scan kernel"); cudaFree(sumArr_d); cudaFree(sumArr_scan_d); } }
f3ae95da6b0ef6f6336b17af1182973045537350.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "spn_diff_kernel.cuh" #include <iostream> #include <cstring> #include <hip/hip_cooperative_groups.h> using namespace cooperative_groups; namespace SPN_DIFF{ /* * BC specific permutation and DTT */ //Contains configuration (macro / c++ global variable) intended to be used across different translation unit // __shared__ unsigned long long perm_lookup_shared[MAX_SBOX][16][2]; __device__ unsigned long long perm_lookup_global_forward[MAX_SBOX][16]; __device__ unsigned long long perm_lookup_global_reversed[MAX_SBOX][16]; // __device__ unsigned long long perm_lookup_device[MAX_SBOX][16][2]; unsigned char perm_host[BLOCK_SIZE_BIT]; unsigned char perm_host_reversed[BLOCK_SIZE_BIT]; unsigned long long perm_lookup_host[MAX_SBOX][16]; //8192 bytes, 8KB, one SM can have 49KB should be fine unsigned long long perm_lookup_host_reversed[MAX_SBOX][16]; __shared__ unsigned int diff_table_shared[16][8]; //NOTE: init in kernel by 1st thread of the block. __device__ unsigned int diff_table_global[][8] = { {0x0 ,0x0 ,0x0 ,0x0 ,0x0 ,0x0 ,0x0 ,0x0}, {0x3 ,0x7 ,0x9 ,0xd ,0x0 ,0x0 ,0x0 ,0x0}, {0x5 ,0x3 ,0x6 ,0xa ,0xc ,0xd ,0xe ,0x0}, {0x6 ,0x1 ,0x3 ,0x4 ,0x7 ,0xa ,0xb ,0x0}, {0x5 ,0x6 ,0x7 ,0x9 ,0xa ,0xc ,0xe ,0x0}, {0xc ,0x1 ,0x4 ,0x9 ,0xa ,0xb ,0xd ,0x0}, {0xb ,0xf ,0x2 ,0x6 ,0x8 ,0xc ,0x0 ,0x0}, {0x1 ,0xf ,0x2 ,0x6 ,0x8 ,0xc ,0x0 ,0x0}, {0xb ,0xf ,0x3 ,0x7 ,0x9 ,0xd ,0x0 ,0x0}, {0x4 ,0xe ,0x2 ,0x6 ,0x8 ,0xc ,0x0 ,0x0}, {0x5 ,0x2 ,0x3 ,0x8 ,0xa ,0xd ,0xe ,0x0}, {0x8 ,0x1 ,0x4 ,0x9 ,0xa ,0xb ,0xd ,0x0}, {0x5 ,0x2 ,0x7 ,0x8 ,0x9 ,0xa ,0xe ,0x0}, {0x2 ,0x1 ,0x3 ,0x4 ,0x7 ,0xa ,0xb ,0x0}, {0x2 ,0x3 ,0x6 ,0x7 ,0x8 ,0x9 ,0xc ,0xd}, {0x1 ,0x4 ,0xe ,0xf ,0x0 ,0x0 ,0x0 ,0x0}, }; __device__ unsigned int diff_table_global_reversed[][8] = { {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, {0x7, 0xf, 0x3, 0x5, 0xb, 0xd, 0x0, 0x0}, {0xd, 0xe, 0xa, 0xc, 0x6, 0x7, 0x9, 0x0}, {0x1, 0x2, 0xe, 0x3, 0x8, 0xa, 0xd, 0x0}, {0x9, 0xf, 0x5, 0xb, 0x3, 0xd, 0x0, 0x0}, {0x2, 0x4, 0xa, 0xc, 0x0, 0x0, 0x0, 0x0}, {0x3, 0x4, 0x2, 0xe, 0x6, 0x7, 0x9, 0x0}, {0x1, 0x4, 0xc, 0x8, 0xe, 0x3, 0xd, 0x0}, {0xb, 0xa, 0xc, 0x6, 0x7, 0x9, 0xe, 0x0}, {0x1, 0x4, 0x5, 0xb, 0x8, 0xc, 0xe, 0x0}, {0x2, 0x4, 0x5, 0xa, 0xb, 0x3, 0xc, 0xd}, {0x6, 0x8, 0x5, 0xb, 0x3, 0xd, 0x0, 0x0}, {0x5, 0x2, 0x4, 0x6, 0x7, 0x9, 0xe, 0x0}, {0x1, 0x2, 0x8, 0xa, 0x5, 0xb, 0xe, 0x0}, {0x9, 0xf, 0x2, 0x4, 0xa, 0xc, 0x0, 0x0}, {0x6, 0x7, 0x8, 0xf, 0x0, 0x0, 0x0, 0x0} }; __shared__ float prob_table_shared[16][8]; //NOTE: init in kernel by 1st thread of the block. __device__ float prob_table_global[16][8]={ {1, 0, 0, 0, 0, 0, 0, 0}, {0.25f , 0.25f , 0.25f , 0.25f , 0, 0, 0, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0, 0}, {0.25f , 0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0, 0}, {0.25f , 0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0, 0}, {0.25f , 0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f}, {0.25f , 0.25f , 0.25f , 0.25f , 0, 0, 0, 0} }; __device__ float prob_table_global_reversed[16][8]={ {1, 0, 0, 0, 0, 0, 0, 0}, {0.25, 0.25, 0.125, 0.125, 0.125, 0.125, 0, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.25, 0.125, 0.125, 0.125, 0.125, 0, 0}, {0.25, 0.25, 0.25, 0.25, 0, 0, 0, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125}, {0.25, 0.25, 0.125, 0.125, 0.125, 0.125, 0, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.25, 0.125, 0.125, 0.125, 0.125, 0, 0}, {0.25, 0.25, 0.25, 0.25, 0, 0, 0, 0} }; __shared__ unsigned int diff_table_size_shared[16]; __device__ unsigned int diff_table_size_global[16] = {1 , 4 , 7 , 7 , 7 , 7 , 6 , 6 , 6 , 6 , 7 , 7 , 7 , 7 , 8 , 4}; __device__ unsigned int diff_table_size_global_reversed[16] = {1, 6, 7, 7, 6, 4, 7, 7, 7, 7, 8, 6, 7, 7, 6, 4}; unsigned int diff_table_size_host[16]= {1 , 4 , 7 , 7 , 7 , 7 , 6 , 6 , 6 , 6 , 7 , 7 , 7 , 7 , 8 , 4 }; unsigned int diff_table_size_host_reversed[16] = {1, 6, 7, 7, 6, 4, 7, 7, 7, 7, 8, 6, 7, 7, 6, 4}; __shared__ unsigned long long branch_size_block_shared[1]; __shared__ float prob_per_as_shared[32]; //MAX_AS __shared__ float prob_per_round_remaining_shared[32]; //MAX_ROUND_FORWARD /* * DX and DY changes */ //Constant memory because it is accessed by the same warp @ the same addresses. (broadcasting) else request will be serialized __constant__ unsigned char final_dy_constant[16] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x0, 0x6, 0x0 }; unsigned char final_dy_host[16] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x0, 0x6, 0x0 }; unsigned char ref_dx_host[16] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; __constant__ float CLUSTER_PROB_BOUND_const = 0; }; GPU_Kenerl_t::GPU_Kenerl_t(int gpu_id, bool is_MITM_used){ //Create its own stream.. hipStreamCreate( &(this->stream_obj) ); //DEBUG: Set prinf limit 10MB // hipDeviceSetLimit(hipLimitPrintfFifoSize, 10000000); //Called @ different GPU threads (each with its own cpu thread) auto cudaStatus = hipSetDevice(gpu_id); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! CudaDeviceNumber :%d", gpu_id ); goto Error; } std::cout << "\nTransfered perm_LUhost from host to device"; cudaStatus = hipMemcpyToSymbol(SPN_DIFF::perm_lookup_global_forward, SPN_DIFF::perm_lookup_host, sizeof(unsigned long long)*16*16); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy perm_lookup_global_forward failed!"); goto Error; } std::cout << "\nTransfered perm_LUhost Reversed from host to device"; cudaStatus = hipMemcpyToSymbol(SPN_DIFF::perm_lookup_global_reversed, SPN_DIFF::perm_lookup_host_reversed, sizeof(unsigned long long)*16*16); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy perm_lookup_global_reversed failed!"); goto Error; } //Allocate Memory HERE //Input Allocation cudaStatus = hipMalloc((void**)& device_dx, sizeof(unsigned char)* 16 * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_dx @init failed!"); goto Error; } cudaStatus = hipMemset(device_dx, 0, sizeof(unsigned char)* 16 * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_dx failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_sbox_index, sizeof(int)* MAX_AS * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_sbox_index @init failed!"); goto Error; } cudaStatus = hipMemset(device_sbox_index, 0, sizeof(int) * MAX_AS * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_sbox_index failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_sbox_num, sizeof(int) * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_sbox_num @init failed!"); goto Error; } cudaStatus = hipMemset(device_sbox_num, 0, sizeof(int) * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_sbox_num failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_prob, sizeof(float) * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_prob @init failed!"); goto Error; } cudaStatus = hipMemset(device_prob, 0, sizeof(float) * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_prob failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_branch_size, sizeof(int) * ( (MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD) + 4) ); // + 4 to accomodate 4 loading @ same time if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_branch_size @init failed!"); goto Error; } cudaStatus = hipMemset(device_branch_size, 0, sizeof(int) * ( (MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD) + 4) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_branch_size failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_branch_size_thread, sizeof(unsigned long long) * ( (GRID_THREAD_SIZE * MAX_ROUND_FORWARD) + 4) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_branch_size_thread @init failed!"); goto Error; } cudaStatus = hipMemset(device_branch_size_thread, 0, sizeof(unsigned long long) * ( (GRID_THREAD_SIZE * MAX_ROUND_FORWARD) + 4) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_branch_size_thread failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_branch_size_block, sizeof(unsigned long long) * ( (BLOCK_NUM * MAX_ROUND_FORWARD) + 4 + 2) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_branch_size_block @init failed!"); goto Error; } cudaStatus = hipMemset(device_branch_size_block, 0, sizeof(unsigned long long) * ( (BLOCK_NUM * MAX_ROUND_FORWARD) + 4 + 2) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_branch_size_block failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_total_branch_size_block, sizeof(unsigned long long) * ( ((BLOCK_NUM * MAX_ROUND_FORWARD) + 1) * 2) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_branch_size_block @init failed!"); goto Error; } cudaStatus = hipMemset(device_total_branch_size_block, 0, sizeof(unsigned long long) * ( ((BLOCK_NUM * MAX_ROUND_FORWARD) + 1) * 2) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_branch_size_block failed!"); goto Error; } //Final (Needs to be reduced) Output Allocation cudaStatus = hipMalloc((void**)& device_cluster_size_final, sizeof(unsigned long long)* THREAD_PER_BLOCK * BLOCK_NUM); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_cluster_size_final @init failed!"); goto Error; } cudaStatus = hipMemset(device_cluster_size_final, 0, sizeof(unsigned long long)* THREAD_PER_BLOCK * BLOCK_NUM); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_cluster_size_final failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_prob_final, sizeof(double)* THREAD_PER_BLOCK * BLOCK_NUM); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_prob_final @init failed!"); goto Error; } cudaStatus = hipMemset(device_prob_final, 0, sizeof(double)* THREAD_PER_BLOCK * BLOCK_NUM); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_prob_final failed!"); goto Error; } //MITM Allocation if (is_MITM_used){ cudaStatus = hipMalloc((void**)& MITM_prob_interm_global, sizeof(float)* GPU_Kenerl_t::MITM_size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc MITM_prob_interm_global @init failed!"); goto Error; } cudaStatus = hipMemset(MITM_prob_interm_global, 0, sizeof(float)* GPU_Kenerl_t::MITM_size ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset MITM_prob_interm_global failed!"); goto Error; } cudaStatus = hipMalloc((void**)& MITM_size_interm_global, sizeof(unsigned long long)* GPU_Kenerl_t::MITM_size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc MITM_prob_interm_global @init failed!"); goto Error; } cudaStatus = hipMemset(MITM_size_interm_global, 0, sizeof(unsigned long long)* GPU_Kenerl_t::MITM_size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset MITM_prob_interm_global failed!"); goto Error; } } //Intermediate sync variable cudaStatus = hipMalloc((void**)& device_last_dx_ptr, sizeof(int) * ( 2) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_last_dx_ptr @init failed!"); goto Error; } cudaStatus = hipMemset(device_last_dx_ptr, 0, sizeof(int) * (2) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_last_dx_ptr failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_branches_sum_before_dx, sizeof(unsigned long long) * ( 2 + 2 + 2 ) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_branches_sum_before_dx @init failed!"); goto Error; } cudaStatus = hipMemset(device_branches_sum_before_dx, 0, sizeof(unsigned long long) * (2 + 2 + 2 ) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_branches_sum_before_dx failed!"); goto Error; } cudaStatus = hipMalloc((void**)& device_has_operation, (sizeof(bool) * 2 ) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc device_has_operation @init failed!"); goto Error; } cudaStatus = hipMemset(device_has_operation, 0, (sizeof(bool) * 2) ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_has_operation failed!"); goto Error; } //Finished std::cout <<"\n----\n"; return; Error: std::cout << "\nCritical CUDA Error. "; if (cudaStatus != hipSuccess) { hipError_t err = hipGetLastError(); std::cout << "\nCRITICAL ERROR from init..."; fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, hipGetErrorName(err), hipGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } }; //Kernel - true_round used for bounding // __launch_bounds__(THREAD_PER_BLOCK, 16) //FOR V100,P100 __global__ void kernel_diff( unsigned char* device_dx, int* device_sbox_index, float* device_prob, int* device_branch_size, unsigned long long* device_cluster_size_final, double* device_prob_final, int* device_last_dx_ptr, bool* device_has_operation, unsigned long long* device_branches_sum_before_dx, unsigned long long* device_branch_size_thread, unsigned long long* device_branch_size_block, unsigned long long* device_total_branch_size_block){ // printf("\nInteger: %i, %i, block_size (threads per blocks) : %i",threadIdx.x, blockIdx.x,blockDim.x); grid_group grid = this_grid(); if (threadIdx.x <32){ if (threadIdx.x<16){ SPN_DIFF::diff_table_size_shared[threadIdx.x] = SPN_DIFF::diff_table_size_global[threadIdx.x]; for (int j = 0; j < 8; j++) { SPN_DIFF::diff_table_shared[threadIdx.x][j] = SPN_DIFF::diff_table_global[threadIdx.x][j]; SPN_DIFF::prob_table_shared[threadIdx.x][j] = SPN_DIFF::prob_table_global[threadIdx.x][j]; } } SPN_DIFF::prob_per_as_shared[threadIdx.x] = powf(CLUSTER_1AS_BEST_PROB, threadIdx.x+1); SPN_DIFF::prob_per_round_remaining_shared[threadIdx.x] = powf(CLUSTER_PROB_INDIV, threadIdx.x); } __syncthreads(); //Computing target array index (id and output_range) //I - THREAD ID / total thread (including all block) - Used to coordinate splitting of tasks const int thread_id_default = (blockIdx.x * blockDim.x) + threadIdx.x; int cur_r = 0; int flip_0_1 = 0; // int flip_iter = 0; long long cur_iter = -1; //This has to be signed int dx_ptr_shared[MAX_ROUND_FORWARD]; // int dx_ptr[MAX_ROUND_FORWARD] = {0}; //From 0- DX_num for that rounds unsigned long long branch_sum_before_dx_ptr_shared[MAX_ROUND_FORWARD] = {0}; unsigned long long branch_sum_before_block_thread_ptr_shared[MAX_ROUND_FORWARD] = {0}; unsigned long long branch_sum_before_block_ptr_shared[MAX_ROUND_FORWARD] = {0}; long long iter_shared[MAX_ROUND_FORWARD]; //Has to be signed //Preparation //Prepare array to store, each entry to next rounds will require the storing and restoring of these into local memory int thread_id_arr[MAX_ROUND_FORWARD]; //Each round GRID_THREAD_SIZE is added to this int cur_thread_id; //Default value does not matter int dx_ptr = 0; unsigned long long branch_sum_before_dx_ptr = 0; unsigned long long branch_sum_before_block_thread_ptr = 0; //From to 0-Block_num for that rounds // unsigned long long branch_sum_before_block_ptr = 0; //IO, need to be retarget after every rounds. //Output //NOTE: 32 here need to be changed #define output_dx(x) (( device_dx + ( 16 * MAX_PATH_PER_ROUND * (cur_r+1)) + (16 * thread_id_default * MAX_SPACE_PER_THREAD) + (x * 16) )) #define output_sbox_index(x) (( device_sbox_index + ( MAX_AS * MAX_PATH_PER_ROUND * (cur_r+1) ) + (MAX_AS* thread_id_default * MAX_SPACE_PER_THREAD) + (x * MAX_AS) )) #define output_prob(x) (( device_prob + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x)) #define output_branch_size(x) (( device_branch_size + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x )) #define output_branch_size_thread() ((device_branch_size_thread + ( GRID_THREAD_SIZE * (cur_r+1) ) + thread_id_default)) #define output_branch_size_block() ((device_branch_size_block + (BLOCK_NUM * (cur_r+1)) + blockIdx.x)) #define output_branch_size_all() ((device_branch_size_block-2)) // unsigned long long* output_branch_size_all = (device_branch_size_block-2); //Input #define cur_dx() ( ( device_dx + (16 * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_sbox_index() ( ( device_sbox_index + (MAX_AS * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_prob() ( ( device_prob + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size() ( ( device_branch_size + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size_thread() ( ( device_branch_size_thread + (GRID_THREAD_SIZE * cur_r) ) ) #define cur_branch_size_block() ( (device_branch_size_block + (BLOCK_NUM * cur_r)) ) //Mainloop - while(true){ //Base case, cur_0 and reamining == 0 bool has_operation = false; int increment = 0; //Determine output save position.. unsigned long long thread_branch_num_so_far = 0; //Allow accumulaction of block_thread branch_num (atomic_add with each reset) if(cur_iter == -1){ cur_iter = output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE + (output_branch_size_all()[flip_0_1] % GRID_THREAD_SIZE != 0); cur_thread_id = thread_id_default * cur_iter; } //calculate block_thread_ptr.. initial int block_thread_ptr = dx_ptr/MAX_SPACE_PER_THREAD; int block_ptr = block_thread_ptr / THREAD_PER_BLOCK; //Find the correct DX with three layer int loop_limit = cur_iter<MAX_SPACE_PER_THREAD?cur_iter:MAX_SPACE_PER_THREAD; for (int i=0;i<loop_limit;i++){ if (dx_ptr < MAX_PATH_PER_ROUND){ branch_sum_before_block_thread_ptr = branch_sum_before_block_thread_ptr_shared[cur_r]; //Shortcut if (cur_thread_id < (cur_branch_size()[dx_ptr] + branch_sum_before_dx_ptr) && cur_thread_id < (cur_branch_size_thread()[block_thread_ptr] + branch_sum_before_block_thread_ptr) ){ goto finfinddx; } //Find the correct block unsigned long long cur_branch_size_reg0,cur_branch_size_reg1,cur_branch_size_reg2,cur_branch_size_reg3; unsigned long long cur_branch_size_reg[4]; unsigned long long branches_temp = branch_sum_before_block_ptr_shared[cur_r]; int initial_block_ptr = block_ptr; while(true){ cur_branch_size_reg0 = cur_branch_size_block()[block_ptr]; cur_branch_size_reg1 = cur_branch_size_block()[block_ptr+1]; cur_branch_size_reg2 = cur_branch_size_block()[block_ptr+2]; cur_branch_size_reg3 = cur_branch_size_block()[block_ptr+3]; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg0; if (cur_branch_size_reg0 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg0; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg1; if (cur_branch_size_reg1 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg1; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg2; if (cur_branch_size_reg2 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg2; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg3; if (cur_branch_size_reg3 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg3; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; } if (true){ hasdx: int initial_block_thread_ptr = block_thread_ptr; if (initial_block_ptr == block_ptr){ //Found out block does not move branches_temp = branch_sum_before_block_thread_ptr; //Take the old branch_size for block_thread (start off with offset block thread) //block_thread_ptr remain unchanged... } else{ //New Block block_thread_ptr = block_ptr * THREAD_PER_BLOCK; //Point to the 1st element of the block //branches_temp remain unchanged because we are starting at 1st element... } //Find the correct block thread bool is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size_thread()[block_thread_ptr]; cur_branch_size_reg[1] = cur_branch_size_thread()[block_thread_ptr+1]; cur_branch_size_reg[2] = cur_branch_size_thread()[block_thread_ptr+2]; cur_branch_size_reg[3] = cur_branch_size_thread()[block_thread_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; block_thread_ptr += i; is_found = true; break; } } if(!is_found){ block_thread_ptr += 4; } } branch_sum_before_block_thread_ptr_shared[cur_r] = branches_temp; //Advance the dx position if needed (different block_thread location) if (block_thread_ptr == initial_block_thread_ptr){ //Start at the same location branches_temp = branch_sum_before_dx_ptr; } else{ dx_ptr = block_thread_ptr * MAX_SPACE_PER_THREAD; } //Find the correct dx position is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size()[dx_ptr]; cur_branch_size_reg[1] = cur_branch_size()[dx_ptr+1]; cur_branch_size_reg[2] = cur_branch_size()[dx_ptr+2]; cur_branch_size_reg[3] = cur_branch_size()[dx_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; dx_ptr += i; is_found = true; break; } } if(!is_found){ dx_ptr += 4; } } branch_sum_before_dx_ptr = branches_temp; } else{ //Nothing here } } finfinddx: ; if (dx_ptr < MAX_PATH_PER_ROUND){ //If dx_ptr is within dx_num, [0-N) has_operation = true; float prob_thread = 1.0; int divide_factor = 1; unsigned int diff_freq_index; //0-16 only unsigned int remaining_value = cur_thread_id - branch_sum_before_dx_ptr ; //7^8 is less than 32 bit... unsigned char* cur_dx_temp = cur_dx() + ( 16 * dx_ptr ) ; //NOTE: Need to modify to fit datastruct of different cipher int* cur_sbox_index_ptr = cur_sbox_index() + (MAX_AS * dx_ptr); unsigned char cur_thread_partial_dy[17]; cur_thread_partial_dy[16] = {0}; memcpy(cur_thread_partial_dy,cur_dx_temp,16); int cur_sbox_index_temp[MAX_AS]; memcpy(cur_sbox_index_temp, cur_sbox_index_ptr, sizeof(int) * MAX_AS); //Points to correct i_th branches of j_dx and so subs #pragma unroll for (int i = 0; i < MAX_AS; i++) { unsigned char cur_val = cur_thread_partial_dy[cur_sbox_index_temp[i]]; diff_freq_index = (remaining_value / divide_factor) % SPN_DIFF::diff_table_size_shared[cur_val]; cur_thread_partial_dy[cur_sbox_index_temp[i]] = SPN_DIFF::diff_table_shared[cur_val][diff_freq_index]; //Assigning target val to partial_dy prob_thread *= (SPN_DIFF::prob_table_shared[cur_val][diff_freq_index]); divide_factor *= SPN_DIFF::diff_table_size_shared[cur_val]; } prob_thread *= (*(cur_prob() + dx_ptr)); if (cur_r+1 != MAX_ROUND_FORWARD){ //Do Permutate unsigned long long front_64 = 0; #pragma unroll for (int i = 0; i < 16; i++) { if ( cur_thread_partial_dy[i] > 0) { front_64 |= SPN_DIFF::perm_lookup_global_forward[i][cur_thread_partial_dy[i]]; } } #pragma unroll for (int i = 0; i < 16; i++) { cur_thread_partial_dy[i] = (front_64 >> ((15 - i) * 4)) & 0xf; } //Calculte sbox index and sbox number int save_sbox_num = 0; int save_branch_size = 1; int save_sbox_index[16]; //Will point to non existance 32 array entry (see substitution below) #pragma unroll for (int i=0;i< 16;i++){ save_sbox_index[i] = 16; } #pragma unroll for (int i = 0; i < 16; i++) { if ((cur_thread_partial_dy[i] & 0xf) > 0) { save_branch_size *= SPN_DIFF::diff_table_size_shared[cur_thread_partial_dy[i]]; save_sbox_index[save_sbox_num] = i; save_sbox_num++; } } //Pruning // if(true){ if (save_sbox_num <= MAX_AS){ //If only next round AS <= 8 //MATSUI BOUND float estimated_com_prob = SPN_DIFF::prob_per_round_remaining_shared[(MAX_ROUND_FORWARD - cur_r - 2)] * SPN_DIFF::prob_per_as_shared[save_sbox_num-1]; //NOTE: this bound is less tight when round entered is not zero.. if ((estimated_com_prob * prob_thread) >= SPN_DIFF::CLUSTER_PROB_BOUND_const) { // if (true) { memcpy(output_dx(increment),cur_thread_partial_dy,16); *output_prob(increment) = prob_thread; memcpy(output_sbox_index(increment), save_sbox_index, sizeof(int) * MAX_AS ); *output_branch_size(increment) = save_branch_size; thread_branch_num_so_far += save_branch_size; increment += 1; } // else{ *output_branch_size = 0;} } // else{ *output_branch_size = 0;} } //LAST ROUNDS... no permutation and straight to savings. else{ bool is_same = true; #pragma unroll for (int i=0;i<16;i++){ if (SPN_DIFF::final_dy_constant[i] != cur_thread_partial_dy[i]){ is_same= false; break; } } if (is_same){ device_prob_final[thread_id_default] += prob_thread; device_cluster_size_final[thread_id_default] += 1; } } } cur_thread_id+=1; } cur_thread_id-=1; cur_iter = cur_iter - loop_limit; if(thread_id_default == 0){ *(device_has_operation + (flip_0_1) ) = has_operation; } if (cur_r != MAX_ROUND_FORWARD-1){ *output_branch_size_thread() += thread_branch_num_so_far; // so_far will be reset each sync thus adding like this is correct. atomicAdd(&SPN_DIFF::branch_size_block_shared[0], thread_branch_num_so_far); __syncthreads(); if (threadIdx.x==0){ // iter_shared[cur_r] = cur_iter; *output_branch_size_block() += SPN_DIFF::branch_size_block_shared[0]; //Since the operation is once per output round, reset the stuff here atomicAdd( (output_branch_size_all()+!flip_0_1), SPN_DIFF::branch_size_block_shared[0]); SPN_DIFF::branch_size_block_shared[0] = 0; } } grid.sync(); //Wait for grid to synchronize before continue if (thread_id_default==0){ output_branch_size_all()[flip_0_1] = 0; } grid.sync(); // has_operation = *(device_has_operation + (flip_0_1) ); flip_0_1 = !flip_0_1; if(true){ if (cur_r != MAX_ROUND_FORWARD-1 && has_operation){ //Is not last round and has operation //Goes forwards iter_shared[cur_r] = cur_iter; dx_ptr_shared[cur_r] = dx_ptr; branch_sum_before_dx_ptr_shared[cur_r] = branch_sum_before_dx_ptr; thread_id_arr[cur_r] = cur_thread_id; cur_r+=1; // cur_thread_id = thread_id_default; //NOTE: does not matter dx_ptr = 0; branch_sum_before_dx_ptr = 0; branch_sum_before_block_thread_ptr_shared[cur_r] = 0; branch_sum_before_block_ptr_shared[cur_r] = 0; cur_iter = -1; //Signal the requirement of intiialization if (cur_r!=MAX_ROUND_FORWARD-1){ *output_branch_size_thread() = 0; //HAs to be reset because of tunneling if (threadIdx.x==0){ *output_branch_size_block() = 0; } } } else if(!has_operation || (cur_r == MAX_ROUND_FORWARD-1 && cur_iter == 0) ){ //Has no operation => cur_iter == 0, //Goes backwards if last rounds or current rounds does not process anythings. do{ cur_r-=1; if(cur_r == -1){ return; //NOTE: Completed computation, Base Case } cur_iter = iter_shared[cur_r]; }while(cur_iter==0); cur_thread_id = thread_id_arr[cur_r] + 1; dx_ptr = dx_ptr_shared[cur_r]; branch_sum_before_dx_ptr = branch_sum_before_dx_ptr_shared[cur_r]; *output_branch_size_thread() = 0; if (threadIdx.x==0){ *output_branch_size_block() = 0; } } else{ //Has operation and is last round and cur_iter != 0 //Repeat last rounds. cur_thread_id += 1; } } } }; __launch_bounds__(THREAD_PER_BLOCK, 8) //FOR V100,P100 __global__ void kernel_diff_mitm( unsigned char* device_dx, int* device_sbox_index, float* device_prob, int* device_branch_size, unsigned long long* device_cluster_size_final, double* device_prob_final, int* device_last_dx_ptr, bool* device_has_operation, unsigned long long* device_branches_sum_before_dx, unsigned long long* device_branch_size_thread, unsigned long long* device_branch_size_block, unsigned long long* device_total_branch_size_block, float* MITM_prob_interm_global, unsigned long long* MITM_size_interm_global){ // printf("\nInteger: %i, %i, block_size (threads per blocks) : %i",threadIdx.x, blockIdx.x,blockDim.x); grid_group grid = this_grid(); if (threadIdx.x <32){ if (threadIdx.x<16){ SPN_DIFF::diff_table_size_shared[threadIdx.x] = SPN_DIFF::diff_table_size_global[threadIdx.x]; for (int j = 0; j < 8; j++) { SPN_DIFF::diff_table_shared[threadIdx.x][j] = SPN_DIFF::diff_table_global[threadIdx.x][j]; SPN_DIFF::prob_table_shared[threadIdx.x][j] = SPN_DIFF::prob_table_global[threadIdx.x][j]; } } SPN_DIFF::prob_per_as_shared[threadIdx.x] = powf(CLUSTER_1AS_BEST_PROB, threadIdx.x+1); SPN_DIFF::prob_per_round_remaining_shared[threadIdx.x] = powf(CLUSTER_PROB_INDIV, threadIdx.x); } __syncthreads(); //wait for init to be finished, sync up all threads within a block... shared memory lies within each block. //Computing target array index (id and output_range) //I - THREAD ID / total thread (including all block) - Used to coordinate splitting of tasks const int thread_id_default = (blockIdx.x * blockDim.x) + threadIdx.x; int cur_r = 0; int flip_0_1 = 0; long long cur_iter = -1; //This has to be signed int dx_ptr_shared[MAX_ROUND_FORWARD]; unsigned long long branch_sum_before_dx_ptr_shared[MAX_ROUND_FORWARD] = {0}; unsigned long long branch_sum_before_block_thread_ptr_shared[MAX_ROUND_FORWARD] = {0}; unsigned long long branch_sum_before_block_ptr_shared[MAX_ROUND_FORWARD] = {0}; long long iter_shared[MAX_ROUND_FORWARD]; //Has to be signed //Preparation //Prepare array to store, each entry to next rounds will require the storing and restoring of these into local memory int thread_id_arr[MAX_ROUND_FORWARD]; //Each round GRID_THREAD_SIZE is added to this int cur_thread_id; //Default value does not matter // int dx_ptr[MAX_ROUND_FORWARD] = {0}; //From 0- DX_num for that rounds int dx_ptr = 0; unsigned long long branch_sum_before_dx_ptr = 0; unsigned long long branch_sum_before_block_thread_ptr = 0; //From to 0-Block_num for that rounds // unsigned long long branch_sum_before_block_ptr = 0; //IO, need to be retarget after every rounds. //Output #define output_dx(x) (( device_dx + ( 16 * MAX_PATH_PER_ROUND * (cur_r+1)) + (16 * thread_id_default * MAX_SPACE_PER_THREAD) + (x * 16) )) #define output_sbox_index(x) (( device_sbox_index + ( MAX_AS * MAX_PATH_PER_ROUND * (cur_r+1) ) + (MAX_AS* thread_id_default * MAX_SPACE_PER_THREAD) + (x * MAX_AS) )) #define output_prob(x) (( device_prob + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x)) #define output_branch_size(x) (( device_branch_size + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x )) #define output_branch_size_thread() ((device_branch_size_thread + ( GRID_THREAD_SIZE * (cur_r+1) ) + thread_id_default)) #define output_branch_size_block() ((device_branch_size_block + (BLOCK_NUM * (cur_r+1)) + blockIdx.x)) #define output_branch_size_all() ((device_branch_size_block-2)) // unsigned long long* output_branch_size_all = (device_branch_size_block-2); //Input #define cur_dx() ( ( device_dx + (16 * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_sbox_index() ( ( device_sbox_index + (MAX_AS * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_prob() ( ( device_prob + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size() ( ( device_branch_size + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size_thread() ( ( device_branch_size_thread + (GRID_THREAD_SIZE * cur_r) ) ) #define cur_branch_size_block() ( (device_branch_size_block + (BLOCK_NUM * cur_r)) ) //Mainloop - while(true){ //Base case, cur_0 and reamining == 0 bool has_operation = false; int increment = 0; //Determine output save position.. unsigned long long thread_branch_num_so_far = 0; //Allow accumulaction of block_thread branch_num (atomic_add with each reset) if(cur_iter == -1){ // cur_iter = ceil(1.0 * output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE); cur_iter = output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE + (output_branch_size_all()[flip_0_1] % GRID_THREAD_SIZE != 0); cur_thread_id = thread_id_default * cur_iter; // flip_iter = !flip_iter; } //calculate block_thread_ptr.. initial int block_thread_ptr = dx_ptr/MAX_SPACE_PER_THREAD; int block_ptr = block_thread_ptr / THREAD_PER_BLOCK; //Find the correct DX with three layer int loop_limit = cur_iter<MAX_SPACE_PER_THREAD?cur_iter:MAX_SPACE_PER_THREAD; for (int i=0;i<loop_limit;i++){ if (dx_ptr < MAX_PATH_PER_ROUND){ branch_sum_before_block_thread_ptr = branch_sum_before_block_thread_ptr_shared[cur_r]; //Shortcut if (cur_thread_id < (cur_branch_size()[dx_ptr] + branch_sum_before_dx_ptr) && cur_thread_id < (cur_branch_size_thread()[block_thread_ptr] + branch_sum_before_block_thread_ptr) ){ goto finfinddx; } //Find the correct block unsigned long long cur_branch_size_reg0,cur_branch_size_reg1,cur_branch_size_reg2,cur_branch_size_reg3; unsigned long long cur_branch_size_reg[4]; unsigned long long branches_temp = branch_sum_before_block_ptr_shared[cur_r]; int initial_block_ptr = block_ptr; while(true){ cur_branch_size_reg0 = cur_branch_size_block()[block_ptr]; cur_branch_size_reg1 = cur_branch_size_block()[block_ptr+1]; cur_branch_size_reg2 = cur_branch_size_block()[block_ptr+2]; cur_branch_size_reg3 = cur_branch_size_block()[block_ptr+3]; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg0; if (cur_branch_size_reg0 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg0; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg1; if (cur_branch_size_reg1 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg1; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg2; if (cur_branch_size_reg2 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg2; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg3; if (cur_branch_size_reg3 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg3; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; } if (true){ hasdx: int initial_block_thread_ptr = block_thread_ptr; if (initial_block_ptr == block_ptr){ //Found out block does not move branches_temp = branch_sum_before_block_thread_ptr; //Take the old branch_size for block_thread (start off with offset block thread) //block_thread_ptr remain unchanged... } else{ //New Block block_thread_ptr = block_ptr * THREAD_PER_BLOCK; //Point to the 1st element of the block //branches_temp remain unchanged because we are starting at 1st element... } //Find the correct block thread bool is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size_thread()[block_thread_ptr]; cur_branch_size_reg[1] = cur_branch_size_thread()[block_thread_ptr+1]; cur_branch_size_reg[2] = cur_branch_size_thread()[block_thread_ptr+2]; cur_branch_size_reg[3] = cur_branch_size_thread()[block_thread_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; block_thread_ptr += i; is_found = true; break; } } if(!is_found){ block_thread_ptr += 4; } } branch_sum_before_block_thread_ptr_shared[cur_r] = branches_temp; //Advance the dx position if needed (different block_thread location) if (block_thread_ptr == initial_block_thread_ptr){ //Start at the same location branches_temp = branch_sum_before_dx_ptr; } else{ dx_ptr = block_thread_ptr * MAX_SPACE_PER_THREAD; } //Find the correct dx position is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size()[dx_ptr]; cur_branch_size_reg[1] = cur_branch_size()[dx_ptr+1]; cur_branch_size_reg[2] = cur_branch_size()[dx_ptr+2]; cur_branch_size_reg[3] = cur_branch_size()[dx_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ //NOTE: no need to check out of bounds if correctly impleneted, it will be filtered out at block level.. branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; dx_ptr += i; is_found = true; break; } } if(!is_found){ dx_ptr += 4; } } branch_sum_before_dx_ptr = branches_temp; } else{ //Nothing here } } finfinddx: ; if (dx_ptr < MAX_PATH_PER_ROUND){ //If dx_ptr is within dx_num, [0-N) has_operation = true; float prob_thread = 1.0; int divide_factor = 1; unsigned int diff_freq_index; //0-16 only unsigned int remaining_value = cur_thread_id - branch_sum_before_dx_ptr ; //7^8 is less than 32 bit... unsigned char* cur_dx_temp = cur_dx() + ( 16 * dx_ptr ) ; //NOTE: Need to modify to fit datastruct of different cipher int* cur_sbox_index_ptr = cur_sbox_index() + (MAX_AS * dx_ptr); unsigned char cur_thread_partial_dy[17]; cur_thread_partial_dy[16] = {0}; memcpy(cur_thread_partial_dy,cur_dx_temp,16); int cur_sbox_index_temp[MAX_AS]; memcpy(cur_sbox_index_temp, cur_sbox_index_ptr, sizeof(int) * MAX_AS); //Points to correct i_th branches of j_dx and so subs #pragma unroll for (int i = 0; i < MAX_AS; i++) { unsigned char cur_val = cur_thread_partial_dy[cur_sbox_index_temp[i]]; diff_freq_index = (remaining_value / divide_factor) % SPN_DIFF::diff_table_size_shared[cur_val]; cur_thread_partial_dy[cur_sbox_index_temp[i]] = SPN_DIFF::diff_table_shared[cur_val][diff_freq_index]; //Assigning target val to partial_dy prob_thread *= (SPN_DIFF::prob_table_shared[cur_val][diff_freq_index]); divide_factor *= SPN_DIFF::diff_table_size_shared[cur_val]; } prob_thread *= (*(cur_prob() + dx_ptr)); //Do Permutate unsigned long long front_64 = 0; #pragma unroll for (int i = 0; i < 16; i++) { if ( cur_thread_partial_dy[i] > 0) { //Permutation LUTable //TODO: require modify to feed in correct forward/backward front_64 |= SPN_DIFF::perm_lookup_global_forward[i][cur_thread_partial_dy[i]]; } } #pragma unroll for (int i = 0; i < 16; i++) { cur_thread_partial_dy[i] = (front_64 >> ((15 - i) * 4)) & 0xf; } if (cur_r != MAX_ROUND_FORWARD-1){ //Calculte sbox index and sbox number int save_sbox_num = 0; int save_branch_size = 1; int save_sbox_index[16]; //Will point to non existance 32 array entry (see substitution below) #pragma unroll for (int i=0;i< 16;i++){ save_sbox_index[i] = 16; } #pragma unroll for (int i = 0; i < 16; i++) { if ((cur_thread_partial_dy[i] & 0xf) > 0) { save_branch_size *= SPN_DIFF::diff_table_size_shared[cur_thread_partial_dy[i]]; save_sbox_index[save_sbox_num] = i; save_sbox_num++; } } //Pruning // if(true){ if (save_sbox_num <= MAX_AS){ //If only next round AS <= 8 //MATSUI BOUND float estimated_com_prob = SPN_DIFF::prob_per_round_remaining_shared[(MAX_ROUND_FORWARD - cur_r - 2)] * SPN_DIFF::prob_per_as_shared[save_sbox_num-1]; //NOTE: this bound is less tight when round entered is not zero.. if ((estimated_com_prob * prob_thread) >= SPN_DIFF::CLUSTER_PROB_BOUND_const) { // if (true) { memcpy(output_dx(increment),cur_thread_partial_dy,16); *output_prob(increment) = prob_thread; memcpy(output_sbox_index(increment), save_sbox_index, sizeof(int) * MAX_AS ); *output_branch_size(increment) = save_branch_size; thread_branch_num_so_far += save_branch_size; increment += 1; } // else{ *output_branch_size = 0;} } // else{ *output_branch_size = 0;} } //LAST ROUNDS... no permutation and straight to savings. else{ int sbox_num=0; int sbox_index[16]={0}; #pragma unroll for (int i=0;i<16;i++){ if (cur_thread_partial_dy[i] !=0){ sbox_index[sbox_num] = i; sbox_num+=1; } } if (sbox_num <=3){ //Possible to store three only... //Computing appropriate index int index=0; #pragma unroll for (int i=0;i<sbox_num;i++){ index|= ( ( (sbox_index[i]&0b11111) | ( (cur_thread_partial_dy[sbox_index[i]]&0b1111) << 5) ) << (i * 9) ); } atomicAdd(MITM_prob_interm_global+index,prob_thread); atomicAdd(MITM_size_interm_global+index,1); } } } cur_thread_id+=1; } cur_thread_id-=1; // cur_iter = cur_iter > 0?cur_iter-loop_limit:cur_iter; //Make sure >0 => -1, if 0 left it cur_iter = cur_iter - loop_limit; if(thread_id_default == 0){ *(device_has_operation + (flip_0_1) ) = has_operation; } if (cur_r != MAX_ROUND_FORWARD-1){ *output_branch_size_thread() += thread_branch_num_so_far; // so_far will be reset each sync thus adding like this is correct. atomicAdd(&SPN_DIFF::branch_size_block_shared[0], thread_branch_num_so_far); __syncthreads(); if (threadIdx.x==0){ // iter_shared[cur_r] = cur_iter; *output_branch_size_block() += SPN_DIFF::branch_size_block_shared[0]; //Since the operation is once per output round, reset the stuff here atomicAdd( (output_branch_size_all()+!flip_0_1), SPN_DIFF::branch_size_block_shared[0]); SPN_DIFF::branch_size_block_shared[0] = 0; } } grid.sync(); //Wait for grid to synchronize before continue if (thread_id_default==0){ output_branch_size_all()[flip_0_1] = 0; } grid.sync(); has_operation = *(device_has_operation + (flip_0_1) ); flip_0_1 = !flip_0_1; if(true){ if (cur_r != MAX_ROUND_FORWARD-1 && has_operation){ //Is not last round and has operation //Goes forwards iter_shared[cur_r] = cur_iter; dx_ptr_shared[cur_r] = dx_ptr; branch_sum_before_dx_ptr_shared[cur_r] = branch_sum_before_dx_ptr; thread_id_arr[cur_r] = cur_thread_id; cur_r+=1; dx_ptr = 0; branch_sum_before_dx_ptr = 0; branch_sum_before_block_thread_ptr_shared[cur_r] = 0; branch_sum_before_block_ptr_shared[cur_r] = 0; cur_iter = -1; //Signal the requirement of intiialization if (cur_r!=MAX_ROUND_FORWARD-1){ *output_branch_size_thread() = 0; //HAs to be reset because of tunneling if (threadIdx.x==0){ *output_branch_size_block() = 0; } } } else if(!has_operation || (cur_r == MAX_ROUND_FORWARD-1 && cur_iter == 0) ){ //Has no operation => cur_iter == 0, //Goes backwards if last rounds or current rounds does not process anythings. do{ cur_r-=1; if(cur_r == -1){ return; //NOTE: Completed computation, Base Case } cur_iter = iter_shared[cur_r]; }while(cur_iter==0); cur_thread_id = thread_id_arr[cur_r] + 1; dx_ptr = dx_ptr_shared[cur_r]; branch_sum_before_dx_ptr = branch_sum_before_dx_ptr_shared[cur_r]; *output_branch_size_thread() = 0; if (threadIdx.x==0){ *output_branch_size_block() = 0; } } else{ //Has operation and is last round and cur_iter != 0 //Repeat last rounds. cur_thread_id += 1; } } } }; __launch_bounds__(THREAD_PER_BLOCK, 8) __global__ void kernel_diff_mitm_backward( unsigned char* device_dx, int* device_sbox_index, float* device_prob, int* device_branch_size, unsigned long long* device_cluster_size_final, double* device_prob_final, int* device_last_dx_ptr, bool* device_has_operation, unsigned long long* device_branches_sum_before_dx, unsigned long long* device_branch_size_thread, unsigned long long* device_branch_size_block, unsigned long long* device_total_branch_size_block, float* MITM_prob_interm_global, unsigned long long* MITM_size_interm_global){ // printf("\nInteger: %i, %i, block_size (threads per blocks) : %i",threadIdx.x, blockIdx.x,blockDim.x); grid_group grid = this_grid(); if (threadIdx.x <32){ if (threadIdx.x<16){ SPN_DIFF::diff_table_size_shared[threadIdx.x] = SPN_DIFF::diff_table_size_global_reversed[threadIdx.x]; for (int j = 0; j < 8; j++) { SPN_DIFF::diff_table_shared[threadIdx.x][j] = SPN_DIFF::diff_table_global_reversed[threadIdx.x][j]; SPN_DIFF::prob_table_shared[threadIdx.x][j] = SPN_DIFF::prob_table_global_reversed[threadIdx.x][j]; } } SPN_DIFF::prob_per_as_shared[threadIdx.x] = powf(CLUSTER_1AS_BEST_PROB, threadIdx.x+1); SPN_DIFF::prob_per_round_remaining_shared[threadIdx.x] = powf(CLUSTER_PROB_INDIV, threadIdx.x); } __syncthreads(); //wait for init to be finished, sync up all threads within a block... shared memory lies within each block. //Computing target array index (id and output_range) //I - THREAD ID / total thread (including all block) - Used to coordinate splitting of tasks const int thread_id_default = (blockIdx.x * blockDim.x) + threadIdx.x; int cur_r = 0; int flip_0_1 = 0; long long cur_iter = -1; //This has to be signed int dx_ptr_shared[MAX_ROUND_BACKWARD]; unsigned long long branch_sum_before_dx_ptr_shared[MAX_ROUND_BACKWARD] = {0}; unsigned long long branch_sum_before_block_thread_ptr_shared[MAX_ROUND_BACKWARD] = {0}; unsigned long long branch_sum_before_block_ptr_shared[MAX_ROUND_BACKWARD] = {0}; long long iter_shared[MAX_ROUND_BACKWARD]; //Has to be signed //Preparation //Prepare array to store, each entry to next rounds will require the storing and restoring of these into local memory int thread_id_arr[MAX_ROUND_BACKWARD]; //Each round GRID_THREAD_SIZE is added to this int cur_thread_id; //Default value does not matter // int dx_ptr[MAX_ROUND_FORWARD] = {0}; //From 0- DX_num for that rounds int dx_ptr = 0; unsigned long long branch_sum_before_dx_ptr = 0; unsigned long long branch_sum_before_block_thread_ptr = 0; //From to 0-Block_num for that rounds // unsigned long long branch_sum_before_block_ptr = 0; //IO, need to be retarget after every rounds. //Output #define output_dx(x) (( device_dx + ( 16 * MAX_PATH_PER_ROUND * (cur_r+1)) + (16 * thread_id_default * MAX_SPACE_PER_THREAD) + (x * 16) )) #define output_sbox_index(x) (( device_sbox_index + ( MAX_AS * MAX_PATH_PER_ROUND * (cur_r+1) ) + (MAX_AS* thread_id_default * MAX_SPACE_PER_THREAD) + (x * MAX_AS) )) #define output_prob(x) (( device_prob + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x)) #define output_branch_size(x) (( device_branch_size + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x )) #define output_branch_size_thread() ((device_branch_size_thread + ( GRID_THREAD_SIZE * (cur_r+1) ) + thread_id_default)) #define output_branch_size_block() ((device_branch_size_block + (BLOCK_NUM * (cur_r+1)) + blockIdx.x)) #define output_branch_size_all() ((device_branch_size_block-2)) // unsigned long long* output_branch_size_all = (device_branch_size_block-2); //Input #define cur_dx() ( ( device_dx + (16 * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_sbox_index() ( ( device_sbox_index + (MAX_AS * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_prob() ( ( device_prob + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size() ( ( device_branch_size + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size_thread() ( ( device_branch_size_thread + (GRID_THREAD_SIZE * cur_r) ) ) #define cur_branch_size_block() ( (device_branch_size_block + (BLOCK_NUM * cur_r)) ) //Mainloop - while(true){ //Base case, cur_0 and reamining == 0 bool has_operation = false; int increment = 0; //Determine output save position.. unsigned long long thread_branch_num_so_far = 0; //Allow accumulaction of block_thread branch_num (atomic_add with each reset) if(cur_iter == -1){ // cur_iter = ceil(1.0 * output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE); cur_iter = output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE + (output_branch_size_all()[flip_0_1] % GRID_THREAD_SIZE != 0); cur_thread_id = thread_id_default * cur_iter; } //calculate block_thread_ptr.. initial int block_thread_ptr = dx_ptr/MAX_SPACE_PER_THREAD; int block_ptr = block_thread_ptr / THREAD_PER_BLOCK; //Find the correct DX with three layer int loop_limit = cur_iter<MAX_SPACE_PER_THREAD?cur_iter:MAX_SPACE_PER_THREAD; for (int i=0;i<loop_limit;i++){ if (dx_ptr < MAX_PATH_PER_ROUND){ branch_sum_before_block_thread_ptr = branch_sum_before_block_thread_ptr_shared[cur_r]; //Shortcut if (cur_thread_id < (cur_branch_size()[dx_ptr] + branch_sum_before_dx_ptr) && cur_thread_id < (cur_branch_size_thread()[block_thread_ptr] + branch_sum_before_block_thread_ptr) ){ goto finfinddx; } //Find the correct block unsigned long long cur_branch_size_reg0,cur_branch_size_reg1,cur_branch_size_reg2,cur_branch_size_reg3; unsigned long long cur_branch_size_reg[4]; unsigned long long branches_temp = branch_sum_before_block_ptr_shared[cur_r]; int initial_block_ptr = block_ptr; while(true){ cur_branch_size_reg0 = cur_branch_size_block()[block_ptr]; cur_branch_size_reg1 = cur_branch_size_block()[block_ptr+1]; cur_branch_size_reg2 = cur_branch_size_block()[block_ptr+2]; cur_branch_size_reg3 = cur_branch_size_block()[block_ptr+3]; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg0; if (cur_branch_size_reg0 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg0; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg1; if (cur_branch_size_reg1 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg1; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg2; if (cur_branch_size_reg2 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg2; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg3; if (cur_branch_size_reg3 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg3; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; } if (true){ hasdx: int initial_block_thread_ptr = block_thread_ptr; if (initial_block_ptr == block_ptr){ //Found out block does not move branches_temp = branch_sum_before_block_thread_ptr; //Take the old branch_size for block_thread (start off with offset block thread) //block_thread_ptr remain unchanged... } else{ //New Block block_thread_ptr = block_ptr * THREAD_PER_BLOCK; //Point to the 1st element of the block //branches_temp remain unchanged because we are starting at 1st element... } //Find the correct block thread bool is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size_thread()[block_thread_ptr]; cur_branch_size_reg[1] = cur_branch_size_thread()[block_thread_ptr+1]; cur_branch_size_reg[2] = cur_branch_size_thread()[block_thread_ptr+2]; cur_branch_size_reg[3] = cur_branch_size_thread()[block_thread_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; block_thread_ptr += i; is_found = true; break; } } if(!is_found){ block_thread_ptr += 4; } } branch_sum_before_block_thread_ptr_shared[cur_r] = branches_temp; //Advance the dx position if needed (different block_thread location) if (block_thread_ptr == initial_block_thread_ptr){ //Start at the same location branches_temp = branch_sum_before_dx_ptr; } else{ dx_ptr = block_thread_ptr * MAX_SPACE_PER_THREAD; } //Find the correct dx position is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size()[dx_ptr]; cur_branch_size_reg[1] = cur_branch_size()[dx_ptr+1]; cur_branch_size_reg[2] = cur_branch_size()[dx_ptr+2]; cur_branch_size_reg[3] = cur_branch_size()[dx_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ //NOTE: no need to check out of bounds if correctly impleneted, it will be filtered out at block level.. branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; dx_ptr += i; is_found = true; break; } } if(!is_found){ dx_ptr += 4; } } branch_sum_before_dx_ptr = branches_temp; } else{ //Nothing here } } finfinddx: ; if (dx_ptr < MAX_PATH_PER_ROUND){ //If dx_ptr is within dx_num, [0-N) has_operation = true; float prob_thread = 1.0; int divide_factor = 1; unsigned int diff_freq_index; //0-16 only unsigned int remaining_value = cur_thread_id - branch_sum_before_dx_ptr ; //7^8 is less than 32 bit... unsigned char* cur_dx_temp = cur_dx() + ( 16 * dx_ptr ) ; //NOTE: Need to modify to fit datastruct of different cipher int* cur_sbox_index_ptr = cur_sbox_index() + (MAX_AS * dx_ptr); unsigned char cur_thread_partial_dy[17]; cur_thread_partial_dy[16] = {0}; memcpy(cur_thread_partial_dy,cur_dx_temp,16); int cur_sbox_index_temp[MAX_AS]; memcpy(cur_sbox_index_temp, cur_sbox_index_ptr, sizeof(int) * MAX_AS); //Points to correct i_th branches of j_dx and so subs #pragma unroll for (int i = 0; i < MAX_AS; i++) { unsigned char cur_val = cur_thread_partial_dy[cur_sbox_index_temp[i]]; diff_freq_index = (remaining_value / divide_factor) % SPN_DIFF::diff_table_size_shared[cur_val]; cur_thread_partial_dy[cur_sbox_index_temp[i]] = SPN_DIFF::diff_table_shared[cur_val][diff_freq_index]; //Assigning target val to partial_dy prob_thread *= (SPN_DIFF::prob_table_shared[cur_val][diff_freq_index]); divide_factor *= SPN_DIFF::diff_table_size_shared[cur_val]; } prob_thread *= (*(cur_prob() + dx_ptr)); if (cur_r+1 != MAX_ROUND_BACKWARD){ //Do Permutate unsigned long long front_64 = 0; #pragma unroll for (int i = 0; i < 16; i++) { if ( cur_thread_partial_dy[i] > 0) { //Permutation LUTable front_64 |= SPN_DIFF::perm_lookup_global_reversed[i][cur_thread_partial_dy[i]]; } } #pragma unroll for (int i = 0; i < 16; i++) { cur_thread_partial_dy[i] = (front_64 >> ((15 - i) * 4)) & 0xf; } //Calculte sbox index and sbox number int save_sbox_num = 0; int save_branch_size = 1; int save_sbox_index[16]; //Will point to non existance 32 array entry (see substitution below) #pragma unroll for (int i=0;i< 16;i++){ save_sbox_index[i] = 16; } #pragma unroll for (int i = 0; i < 16; i++) { if ((cur_thread_partial_dy[i] & 0xf) > 0) { save_branch_size *= SPN_DIFF::diff_table_size_shared[cur_thread_partial_dy[i]]; save_sbox_index[save_sbox_num] = i; save_sbox_num++; } } //Pruning // if(true){ if (save_sbox_num <= MAX_AS){ //If only next round AS <= 8 //MATSUI BOUND float estimated_com_prob = SPN_DIFF::prob_per_round_remaining_shared[(MAX_ROUND_BACKWARD - cur_r - 2)] * SPN_DIFF::prob_per_as_shared[save_sbox_num-1]; //NOTE: this bound is less tight when round entered is not zero.. if ((estimated_com_prob * prob_thread) >= SPN_DIFF::CLUSTER_PROB_BOUND_const) { memcpy(output_dx(increment),cur_thread_partial_dy,16); *output_prob(increment) = prob_thread; memcpy(output_sbox_index(increment), save_sbox_index, sizeof(int) * MAX_AS ); *output_branch_size(increment) = save_branch_size; thread_branch_num_so_far += save_branch_size; increment += 1; } // else{ *output_branch_size = 0;} } // else{ *output_branch_size = 0;} } //LAST ROUNDS... no permutation and straight to savings. else{ int sbox_num=0; int sbox_index[16]={0}; #pragma unroll for (int i=0;i<16;i++){ if (cur_thread_partial_dy[i] !=0){ sbox_index[sbox_num] = i; sbox_num+=1; } } if (sbox_num <=3){ //Possible to store three only... //Computing appropriate index int index=0; #pragma unroll for (int i=0;i<sbox_num;i++){ index|= ( ( (sbox_index[i]&0b11111) | ( (cur_thread_partial_dy[sbox_index[i]]&0b1111) << 5) ) << (i * 9) ); } unsigned long long target_size = MITM_size_interm_global[index]; if(target_size > 0){ //Exist connection double target_prob = ( (1.0 * prob_thread) * MITM_prob_interm_global[index]); //DEBUG: enable back //Add to collection device_prob_final[thread_id_default] += target_prob; device_cluster_size_final[thread_id_default] += target_size; } } } } cur_thread_id+=1; } cur_thread_id-=1; // cur_iter = cur_iter > 0?cur_iter-loop_limit:cur_iter; //Make sure >0 => -1, if 0 left it cur_iter = cur_iter - loop_limit; if(thread_id_default == 0){ *(device_has_operation + (flip_0_1) ) = has_operation; } if (cur_r != MAX_ROUND_BACKWARD-1){ *output_branch_size_thread() += thread_branch_num_so_far; // so_far will be reset each sync thus adding like this is correct. atomicAdd(&SPN_DIFF::branch_size_block_shared[0], thread_branch_num_so_far); __syncthreads(); if (threadIdx.x==0){ // iter_shared[cur_r] = cur_iter; *output_branch_size_block() += SPN_DIFF::branch_size_block_shared[0]; //Since the operation is once per output round, reset the stuff here atomicAdd( (output_branch_size_all()+!flip_0_1), SPN_DIFF::branch_size_block_shared[0]); SPN_DIFF::branch_size_block_shared[0] = 0; } } grid.sync(); //Wait for grid to synchronize before continue if (thread_id_default==0){ output_branch_size_all()[flip_0_1] = 0; } grid.sync(); has_operation = *(device_has_operation + (flip_0_1) ); flip_0_1 = !flip_0_1; if(true){ if (cur_r != MAX_ROUND_BACKWARD-1 && has_operation){ //Is not last round and has operation //Goes forwards iter_shared[cur_r] = cur_iter; dx_ptr_shared[cur_r] = dx_ptr; branch_sum_before_dx_ptr_shared[cur_r] = branch_sum_before_dx_ptr; thread_id_arr[cur_r] = cur_thread_id; cur_r+=1; // cur_thread_id = thread_id_default; //NOTE: does not matter dx_ptr = 0; branch_sum_before_dx_ptr = 0; branch_sum_before_block_thread_ptr_shared[cur_r] = 0; branch_sum_before_block_ptr_shared[cur_r] = 0; cur_iter = -1; //Signal the requirement of intiialization if (cur_r!=MAX_ROUND_BACKWARD-1){ *output_branch_size_thread() = 0; //HAs to be reset because of tunneling if (threadIdx.x==0){ *output_branch_size_block() = 0; } } } else if(!has_operation || (cur_r == MAX_ROUND_BACKWARD-1 && cur_iter == 0) ){ //Has no operation => cur_iter == 0, //Goes backwards if last rounds or current rounds does not process anythings. do{ cur_r-=1; if(cur_r == -1){ return; //NOTE: Completed computation, Base Case } cur_iter = iter_shared[cur_r]; }while(cur_iter==0); cur_thread_id = thread_id_arr[cur_r] + 1; dx_ptr = dx_ptr_shared[cur_r]; branch_sum_before_dx_ptr = branch_sum_before_dx_ptr_shared[cur_r]; *output_branch_size_thread() = 0; if (threadIdx.x==0){ *output_branch_size_block() = 0; } } else{ //Has operation and is last round and cur_iter != 0 //Repeat last rounds. cur_thread_id += 1; } } } }; //Kernel Launch preparation from here //NOTE: Branch Size is assumed to be zero... void GPU_Kenerl_t::kernel_compute(int branch_size, unsigned char* dx, unsigned char* dy, int* sbox_index, int* sbox_num, int* nb_size, float* cur_prob, int cur_r, int target_round){ hipError_t cudaStatus; if (branch_size >1){ printf("\nInitial DX > 1 size is not supported.."); return; } cudaStatus = hipMemcpyToSymbol(SPN_DIFF::final_dy_constant, dy, sizeof(unsigned char)*16); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpyToSymbol final_dy_constant failed!"); } cudaStatus = hipMemcpy(device_dx, dx, sizeof(unsigned char) * 16 * branch_size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy dx failed!"); } cudaStatus = hipMemcpy(device_sbox_index, sbox_index, sizeof(int) * MAX_AS * branch_size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy sbox_index failed!"); } // cudaStatus = hipMemcpy(device_sbox_num, sbox_num, sizeof(int) * branch_size, hipMemcpyHostToDevice); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy sbox_num failed!"); // } cudaStatus = hipMemcpy(device_prob, cur_prob, sizeof(float) * branch_size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy cur_prob failed!"); } cudaStatus = hipMemcpy(device_branch_size, nb_size, sizeof(int) * branch_size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy nb_size failed!"); } unsigned long long* nb_size_longlong = new unsigned long long(); *nb_size_longlong = *nb_size; cudaStatus = hipMemcpy(device_branch_size_block, nb_size_longlong, sizeof(unsigned long long), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy nb_size failed!"); } cudaStatus = hipMemcpy(device_branch_size_thread, nb_size_longlong, sizeof(unsigned long long), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy cur_prob failed!"); } unsigned long long* device_branch_size_block2 = (device_branch_size_block+2); cudaStatus = hipMemcpy(device_branch_size_block2, nb_size_longlong, sizeof(unsigned long long), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy nb_size failed!"); } // Starting Kernel // int *round_to_process = new int(); // *round_to_process = target_round - cur_r; int *round_offset = new int(); *round_offset = cur_r; // int *branch_size_ptr = new int(); // *branch_size_ptr = branch_size; // int round_to_process = target_round - cur_r; // int round_offset = cur_r; bool is_MITM =true; if (is_MITM){ void** args = new void*[14]; args[0] = &device_dx; args[1] = &device_sbox_index; args[2] = &device_prob; args[3] = &device_branch_size; args[4] = &device_cluster_size_final; args[5] = &device_prob_final; args[6] = &device_last_dx_ptr; args[7] = &device_has_operation; args[8] = &device_branches_sum_before_dx; args[9] = &device_branch_size_thread; args[10] = &device_branch_size_block2; args[11] = &device_total_branch_size_block; args[12] = &MITM_prob_interm_global; args[13] = &MITM_size_interm_global; dim3 dimGrid(BLOCK_NUM, 1, 1); dim3 dimBlock(THREAD_PER_BLOCK, 1, 1); std::cout << "\nTransfered constant matsui bound from host to device"; cudaStatus = hipMemcpyToSymbol(SPN_DIFF::CLUSTER_PROB_BOUND_const, &CLUSTER_PROB_BOUND_FORWARD, sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpyToSymbol CLUSTER_PROB_BOUND_FORWARD failed!"); getchar(); exit(-1); } cudaStatus = hipLaunchCooperativeKernel((void*) kernel_diff_mitm, dimGrid, dimBlock, args); if (cudaStatus != hipSuccess) { hipError_t err = hipGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, hipGetErrorName(err), hipGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } // cudaStatus = hipDeviceSynchronize(); // if (cudaStatus != hipSuccess) { // hipError_t err = hipGetLastError(); // fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, hipGetErrorName(err), hipGetErrorString(err)); // std::cout << "\nExiting the program manually..."; // getchar(); // exit(-1); // } //Backwards cudaStatus = hipMemset(device_branch_size_block+1, 0, sizeof(unsigned long long)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_branch_size_block+1 during backwards failed!"); } cudaStatus = hipMemset(device_branch_size_block2+BLOCK_NUM, 0, sizeof(unsigned long long)*BLOCK_NUM ); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_branch_size_block2 during backwards failed!"); } cudaStatus = hipMemset(device_branch_size_thread + GRID_THREAD_SIZE, 0, sizeof(unsigned long long)*GRID_THREAD_SIZE); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset device_branch_size_thread during backwards failed!"); } //Transfer DY part std::cout << "\nTransfered constant matsui bound from host to device"; cudaStatus = hipMemcpyToSymbol(SPN_DIFF::CLUSTER_PROB_BOUND_const, &CLUSTER_PROB_BOUND_BACKWARD, sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpyToSymbol CLUSTER_PROB_BOUND_BACKWARD failed!"); getchar(); exit(-1); } cudaStatus = hipMemcpy(device_dx, dy, sizeof(unsigned char) * 16 * branch_size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy dx failed!"); } *nb_size = 1; int temp_index_ptr = 0; for (int i=0;i<16;i++){ sbox_index[i] = 16; } for (int i=0;i<16;i++){ // sbox_index[i] = dy[i] > 0? i : 0; if(dy[i]>0){ sbox_index[temp_index_ptr] = i; *(nb_size) = *(nb_size) * (SPN_DIFF::diff_table_size_host_reversed[dy[i]]); temp_index_ptr+=1; } } *cur_prob = 1.0f; cudaStatus = hipMemcpy(device_sbox_index, sbox_index, sizeof(int) * MAX_AS * branch_size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy sbox_index failed!"); } cudaStatus = hipMemcpy(device_prob, cur_prob, sizeof(float) * branch_size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy cur_prob failed!"); } cudaStatus = hipMemcpy(device_branch_size, nb_size, sizeof(int) * branch_size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy nb_size failed!"); } *nb_size_longlong = *nb_size; //Because nb_size is int, cast to long long in this case. cudaStatus = hipMemcpy(device_branch_size_block, nb_size_longlong, sizeof(unsigned long long), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy nb_size failed!"); } cudaStatus = hipMemcpy(device_branch_size_thread, nb_size_longlong, sizeof(unsigned long long), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy cur_prob failed!"); } // unsigned long long* device_branch_size_block2 = (device_branch_size_block+2); cudaStatus = hipMemcpy(device_branch_size_block2, nb_size_longlong, sizeof(unsigned long long), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy nb_size failed!"); } cudaStatus = hipLaunchCooperativeKernel((void*) kernel_diff_mitm_backward, dimGrid, dimBlock, args); if (cudaStatus != hipSuccess) { hipError_t err = hipGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, hipGetErrorName(err), hipGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { hipError_t err = hipGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, hipGetErrorName(err), hipGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } delete args; } else{ void** args = new void*[12]; // args[0] = &round_offset; // args[1] = &round_to_process; // args[2] = &branch_size; // int* device_sbox_index2 = device_sbox_index+1; // bool* device_has_operation2 = device_has_operation + 2; // args[0] = round_offset; // args[0] = round_to_process; // args[2] = branch_size_ptr; args[0] = &device_dx; args[1] = &device_sbox_index; args[2] = &device_prob; args[3] = &device_branch_size; args[4] = &device_cluster_size_final; args[5] = &device_prob_final; args[6] = &device_last_dx_ptr; args[7] = &device_has_operation; args[8] = &device_branches_sum_before_dx; args[9] = &device_branch_size_thread; args[10] = &device_branch_size_block2; args[11] = &device_total_branch_size_block; dim3 dimGrid(BLOCK_NUM, 1, 1); dim3 dimBlock(THREAD_PER_BLOCK, 1, 1); std::cout << "\nTransfered constant matsui bound from host to device"; cudaStatus = hipMemcpyToSymbol(SPN_DIFF::CLUSTER_PROB_BOUND_const, &CLUSTER_PROB_BOUND, sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpyToSymbol CLUSTER_PROB_BOUND_const failed!"); getchar(); exit(-1); } cudaStatus = hipLaunchCooperativeKernel((void*) kernel_diff, dimGrid, dimBlock, args); if (cudaStatus != hipSuccess) { hipError_t err = hipGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, hipGetErrorName(err), hipGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { hipError_t err = hipGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, hipGetErrorName(err), hipGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } delete args; } //hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, my_kernel, numThreads, 0); // initialize, then launch // hipLaunchCooperativeKernel((void*)my_kernel, deviceProp.multiProcessorCount*numBlocksPerSm, numThreads, args); // dim3 dimGrid(numSms * numBlocksPerSm, 1, 1); // hipLaunchCooperativeKernel( // const T *func, // dim3 gridDim, // dim3 blockDim, // void **args, // size_t sharedMem = 0, // hipStream_t stream = 0 // ) } void GPU_Kenerl_t::kernel_reduction(double& gpu_prob, long long& gpu_size){ long long size_arr[GRID_THREAD_SIZE]; double prob_arr[GRID_THREAD_SIZE]; const int size = GRID_THREAD_SIZE; auto cudaStatus = hipMemcpy(size_arr, device_cluster_size_final, sizeof(unsigned long long)* size, hipMemcpyDeviceToHost); #ifdef CUDA_ERROR_PRINT if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy (device_cluster_size_final) failed!");\ getchar(); } #endif cudaStatus = hipMemcpy(prob_arr, device_prob_final, sizeof(double)* size, hipMemcpyDeviceToHost); #ifdef CUDA_ERROR_PRINT if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy (device_prob_final) failed!");\ getchar(); } #endif printf("\nBefore Reduction \t GPU_Cluster_size : %lld\t GPU_Prob : %f",gpu_size, gpu_prob); for (int i=0;i< size; i++ ){ gpu_size += size_arr[i]; gpu_prob += prob_arr[i]; } printf("\nAfter Reduction \t GPU_Cluster_size : %lld\t GPU_Prob : %f",gpu_size, gpu_prob); } //Called Once (1) @ program entry. void SPN_DIFF::init(){ int pi=0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); std::cout << "\nGPU Info :"; std::cout << "\nSM numbers: " << deviceProp.multiProcessorCount; hipDeviceGetAttribute(&pi, hipDeviceAttributeCooperativeLaunch, 0); std::cout << "\nSupport Cooperative Groups (Grid): " << (pi==1? " True":" FALSE"); int numBlocksPerSm; hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, kernel_diff, THREAD_PER_BLOCK, 0); std::cout << "\nMax Blocks Per SM : " << numBlocksPerSm; if (deviceProp.multiProcessorCount != 9){ std::cout << "\nPress enter key to conitnue"; std::cout << "\n----------\n"; getchar(); } if (pi==0){ std::cout << "\nCooperative Groups not supported on target GPU"; exit(-1); } // std::cout <<"\nInit Trifle Reverse Differential Table:{\n"; // std::cout <<"\nPRESENT Permutation:{\n"; for (int i = 0; i < 64; i++) { // if (i%16==0){ // std:: cerr <<"\n"; // } SPN_DIFF::perm_host[i] = (i / 4) + ((i % 4) * 16); // std::cout << (int) SPN_DIFF::perm_host[i]<< ","; } // std::cout << "\n}\n" ; // std::cout <<"\nPresent Permutation Reversed:{\n"; for (int i=0;i<64;i++){ SPN_DIFF::perm_host_reversed[SPN_DIFF::perm_host[i]] = i; } // for (int i=0;i<64;i++){ // std::cout << (int) SPN_DIFF::perm_host_reversed[i]<< ","; // } // std::cout << "}\n" ; //-- // std::cout <<"\n4bit Permutation LUTable * 32 (Size is 32*16*16 is 8192Bytes) :{\n"; for (int sbox_pos=0;sbox_pos<16;sbox_pos++){ for (int sbox_val=0;sbox_val<16;sbox_val++){ unsigned char dx[16] = {0}; dx[sbox_pos] = sbox_val; //Do permutation unsigned long long front_64 = 0, front_64_reversed=0; for (int i = 0; i < 16; i++) { if (dx[i] > 0) { for (int j = 0; j < 4; j++) { //Actually filtered_bit unsigned long long filtered_word = ((dx[i] & (0x1 << j)) >> j) & 0x1; if (filtered_word == 0) continue; //no point continue if zero, go to next elements int bit_pos = (SPN_DIFF::perm_host[((15 - i) * 4) + j]); int bit_pos_reversed = (SPN_DIFF::perm_host_reversed[((15 - i) * 4) + j]); front_64 |= (filtered_word << bit_pos); front_64_reversed |= (filtered_word << bit_pos_reversed); } } } //Front 64, 0-15, Back64 - 16-31 SPN_DIFF::perm_lookup_host[sbox_pos][sbox_val]=front_64; SPN_DIFF::perm_lookup_host_reversed[sbox_pos][sbox_val]=front_64_reversed; } } // std::cout << "}\n" ; };
f3ae95da6b0ef6f6336b17af1182973045537350.cu
#include "spn_diff_kernel.cuh" #include <iostream> #include <cstring> #include <cooperative_groups.h> using namespace cooperative_groups; namespace SPN_DIFF{ /* * BC specific permutation and DTT */ //Contains configuration (macro / c++ global variable) intended to be used across different translation unit // __shared__ unsigned long long perm_lookup_shared[MAX_SBOX][16][2]; __device__ unsigned long long perm_lookup_global_forward[MAX_SBOX][16]; __device__ unsigned long long perm_lookup_global_reversed[MAX_SBOX][16]; // __device__ unsigned long long perm_lookup_device[MAX_SBOX][16][2]; unsigned char perm_host[BLOCK_SIZE_BIT]; unsigned char perm_host_reversed[BLOCK_SIZE_BIT]; unsigned long long perm_lookup_host[MAX_SBOX][16]; //8192 bytes, 8KB, one SM can have 49KB should be fine unsigned long long perm_lookup_host_reversed[MAX_SBOX][16]; __shared__ unsigned int diff_table_shared[16][8]; //NOTE: init in kernel by 1st thread of the block. __device__ unsigned int diff_table_global[][8] = { {0x0 ,0x0 ,0x0 ,0x0 ,0x0 ,0x0 ,0x0 ,0x0}, {0x3 ,0x7 ,0x9 ,0xd ,0x0 ,0x0 ,0x0 ,0x0}, {0x5 ,0x3 ,0x6 ,0xa ,0xc ,0xd ,0xe ,0x0}, {0x6 ,0x1 ,0x3 ,0x4 ,0x7 ,0xa ,0xb ,0x0}, {0x5 ,0x6 ,0x7 ,0x9 ,0xa ,0xc ,0xe ,0x0}, {0xc ,0x1 ,0x4 ,0x9 ,0xa ,0xb ,0xd ,0x0}, {0xb ,0xf ,0x2 ,0x6 ,0x8 ,0xc ,0x0 ,0x0}, {0x1 ,0xf ,0x2 ,0x6 ,0x8 ,0xc ,0x0 ,0x0}, {0xb ,0xf ,0x3 ,0x7 ,0x9 ,0xd ,0x0 ,0x0}, {0x4 ,0xe ,0x2 ,0x6 ,0x8 ,0xc ,0x0 ,0x0}, {0x5 ,0x2 ,0x3 ,0x8 ,0xa ,0xd ,0xe ,0x0}, {0x8 ,0x1 ,0x4 ,0x9 ,0xa ,0xb ,0xd ,0x0}, {0x5 ,0x2 ,0x7 ,0x8 ,0x9 ,0xa ,0xe ,0x0}, {0x2 ,0x1 ,0x3 ,0x4 ,0x7 ,0xa ,0xb ,0x0}, {0x2 ,0x3 ,0x6 ,0x7 ,0x8 ,0x9 ,0xc ,0xd}, {0x1 ,0x4 ,0xe ,0xf ,0x0 ,0x0 ,0x0 ,0x0}, }; __device__ unsigned int diff_table_global_reversed[][8] = { {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, {0x7, 0xf, 0x3, 0x5, 0xb, 0xd, 0x0, 0x0}, {0xd, 0xe, 0xa, 0xc, 0x6, 0x7, 0x9, 0x0}, {0x1, 0x2, 0xe, 0x3, 0x8, 0xa, 0xd, 0x0}, {0x9, 0xf, 0x5, 0xb, 0x3, 0xd, 0x0, 0x0}, {0x2, 0x4, 0xa, 0xc, 0x0, 0x0, 0x0, 0x0}, {0x3, 0x4, 0x2, 0xe, 0x6, 0x7, 0x9, 0x0}, {0x1, 0x4, 0xc, 0x8, 0xe, 0x3, 0xd, 0x0}, {0xb, 0xa, 0xc, 0x6, 0x7, 0x9, 0xe, 0x0}, {0x1, 0x4, 0x5, 0xb, 0x8, 0xc, 0xe, 0x0}, {0x2, 0x4, 0x5, 0xa, 0xb, 0x3, 0xc, 0xd}, {0x6, 0x8, 0x5, 0xb, 0x3, 0xd, 0x0, 0x0}, {0x5, 0x2, 0x4, 0x6, 0x7, 0x9, 0xe, 0x0}, {0x1, 0x2, 0x8, 0xa, 0x5, 0xb, 0xe, 0x0}, {0x9, 0xf, 0x2, 0x4, 0xa, 0xc, 0x0, 0x0}, {0x6, 0x7, 0x8, 0xf, 0x0, 0x0, 0x0, 0x0} }; __shared__ float prob_table_shared[16][8]; //NOTE: init in kernel by 1st thread of the block. __device__ float prob_table_global[16][8]={ {1, 0, 0, 0, 0, 0, 0, 0}, {0.25f , 0.25f , 0.25f , 0.25f , 0, 0, 0, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0, 0}, {0.25f , 0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0, 0}, {0.25f , 0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0, 0}, {0.25f , 0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.25f , 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0}, {0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f, 0.125f}, {0.25f , 0.25f , 0.25f , 0.25f , 0, 0, 0, 0} }; __device__ float prob_table_global_reversed[16][8]={ {1, 0, 0, 0, 0, 0, 0, 0}, {0.25, 0.25, 0.125, 0.125, 0.125, 0.125, 0, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.25, 0.125, 0.125, 0.125, 0.125, 0, 0}, {0.25, 0.25, 0.25, 0.25, 0, 0, 0, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125}, {0.25, 0.25, 0.125, 0.125, 0.125, 0.125, 0, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0}, {0.25, 0.25, 0.125, 0.125, 0.125, 0.125, 0, 0}, {0.25, 0.25, 0.25, 0.25, 0, 0, 0, 0} }; __shared__ unsigned int diff_table_size_shared[16]; __device__ unsigned int diff_table_size_global[16] = {1 , 4 , 7 , 7 , 7 , 7 , 6 , 6 , 6 , 6 , 7 , 7 , 7 , 7 , 8 , 4}; __device__ unsigned int diff_table_size_global_reversed[16] = {1, 6, 7, 7, 6, 4, 7, 7, 7, 7, 8, 6, 7, 7, 6, 4}; unsigned int diff_table_size_host[16]= {1 , 4 , 7 , 7 , 7 , 7 , 6 , 6 , 6 , 6 , 7 , 7 , 7 , 7 , 8 , 4 }; unsigned int diff_table_size_host_reversed[16] = {1, 6, 7, 7, 6, 4, 7, 7, 7, 7, 8, 6, 7, 7, 6, 4}; __shared__ unsigned long long branch_size_block_shared[1]; __shared__ float prob_per_as_shared[32]; //MAX_AS __shared__ float prob_per_round_remaining_shared[32]; //MAX_ROUND_FORWARD /* * DX and DY changes */ //Constant memory because it is accessed by the same warp @ the same addresses. (broadcasting) else request will be serialized __constant__ unsigned char final_dy_constant[16] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x0, 0x6, 0x0 }; unsigned char final_dy_host[16] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x0, 0x6, 0x0 }; unsigned char ref_dx_host[16] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; __constant__ float CLUSTER_PROB_BOUND_const = 0; }; GPU_Kenerl_t::GPU_Kenerl_t(int gpu_id, bool is_MITM_used){ //Create its own stream.. cudaStreamCreate( &(this->stream_obj) ); //DEBUG: Set prinf limit 10MB // cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 10000000); //Called @ different GPU threads (each with its own cpu thread) auto cudaStatus = cudaSetDevice(gpu_id); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! CudaDeviceNumber :%d", gpu_id ); goto Error; } std::cout << "\nTransfered perm_LUhost from host to device"; cudaStatus = cudaMemcpyToSymbol(SPN_DIFF::perm_lookup_global_forward, SPN_DIFF::perm_lookup_host, sizeof(unsigned long long)*16*16); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy perm_lookup_global_forward failed!"); goto Error; } std::cout << "\nTransfered perm_LUhost Reversed from host to device"; cudaStatus = cudaMemcpyToSymbol(SPN_DIFF::perm_lookup_global_reversed, SPN_DIFF::perm_lookup_host_reversed, sizeof(unsigned long long)*16*16); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy perm_lookup_global_reversed failed!"); goto Error; } //Allocate Memory HERE //Input Allocation cudaStatus = cudaMalloc((void**)& device_dx, sizeof(unsigned char)* 16 * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_dx @init failed!"); goto Error; } cudaStatus = cudaMemset(device_dx, 0, sizeof(unsigned char)* 16 * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_dx failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_sbox_index, sizeof(int)* MAX_AS * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_sbox_index @init failed!"); goto Error; } cudaStatus = cudaMemset(device_sbox_index, 0, sizeof(int) * MAX_AS * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_sbox_index failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_sbox_num, sizeof(int) * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_sbox_num @init failed!"); goto Error; } cudaStatus = cudaMemset(device_sbox_num, 0, sizeof(int) * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_sbox_num failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_prob, sizeof(float) * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_prob @init failed!"); goto Error; } cudaStatus = cudaMemset(device_prob, 0, sizeof(float) * MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_prob failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_branch_size, sizeof(int) * ( (MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD) + 4) ); // + 4 to accomodate 4 loading @ same time if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_branch_size @init failed!"); goto Error; } cudaStatus = cudaMemset(device_branch_size, 0, sizeof(int) * ( (MAX_PATH_PER_ROUND * MAX_ROUND_FORWARD) + 4) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_branch_size failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_branch_size_thread, sizeof(unsigned long long) * ( (GRID_THREAD_SIZE * MAX_ROUND_FORWARD) + 4) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_branch_size_thread @init failed!"); goto Error; } cudaStatus = cudaMemset(device_branch_size_thread, 0, sizeof(unsigned long long) * ( (GRID_THREAD_SIZE * MAX_ROUND_FORWARD) + 4) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_branch_size_thread failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_branch_size_block, sizeof(unsigned long long) * ( (BLOCK_NUM * MAX_ROUND_FORWARD) + 4 + 2) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_branch_size_block @init failed!"); goto Error; } cudaStatus = cudaMemset(device_branch_size_block, 0, sizeof(unsigned long long) * ( (BLOCK_NUM * MAX_ROUND_FORWARD) + 4 + 2) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_branch_size_block failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_total_branch_size_block, sizeof(unsigned long long) * ( ((BLOCK_NUM * MAX_ROUND_FORWARD) + 1) * 2) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_branch_size_block @init failed!"); goto Error; } cudaStatus = cudaMemset(device_total_branch_size_block, 0, sizeof(unsigned long long) * ( ((BLOCK_NUM * MAX_ROUND_FORWARD) + 1) * 2) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_branch_size_block failed!"); goto Error; } //Final (Needs to be reduced) Output Allocation cudaStatus = cudaMalloc((void**)& device_cluster_size_final, sizeof(unsigned long long)* THREAD_PER_BLOCK * BLOCK_NUM); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_cluster_size_final @init failed!"); goto Error; } cudaStatus = cudaMemset(device_cluster_size_final, 0, sizeof(unsigned long long)* THREAD_PER_BLOCK * BLOCK_NUM); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_cluster_size_final failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_prob_final, sizeof(double)* THREAD_PER_BLOCK * BLOCK_NUM); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_prob_final @init failed!"); goto Error; } cudaStatus = cudaMemset(device_prob_final, 0, sizeof(double)* THREAD_PER_BLOCK * BLOCK_NUM); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_prob_final failed!"); goto Error; } //MITM Allocation if (is_MITM_used){ cudaStatus = cudaMalloc((void**)& MITM_prob_interm_global, sizeof(float)* GPU_Kenerl_t::MITM_size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc MITM_prob_interm_global @init failed!"); goto Error; } cudaStatus = cudaMemset(MITM_prob_interm_global, 0, sizeof(float)* GPU_Kenerl_t::MITM_size ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset MITM_prob_interm_global failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& MITM_size_interm_global, sizeof(unsigned long long)* GPU_Kenerl_t::MITM_size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc MITM_prob_interm_global @init failed!"); goto Error; } cudaStatus = cudaMemset(MITM_size_interm_global, 0, sizeof(unsigned long long)* GPU_Kenerl_t::MITM_size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset MITM_prob_interm_global failed!"); goto Error; } } //Intermediate sync variable cudaStatus = cudaMalloc((void**)& device_last_dx_ptr, sizeof(int) * ( 2) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_last_dx_ptr @init failed!"); goto Error; } cudaStatus = cudaMemset(device_last_dx_ptr, 0, sizeof(int) * (2) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_last_dx_ptr failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_branches_sum_before_dx, sizeof(unsigned long long) * ( 2 + 2 + 2 ) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_branches_sum_before_dx @init failed!"); goto Error; } cudaStatus = cudaMemset(device_branches_sum_before_dx, 0, sizeof(unsigned long long) * (2 + 2 + 2 ) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_branches_sum_before_dx failed!"); goto Error; } cudaStatus = cudaMalloc((void**)& device_has_operation, (sizeof(bool) * 2 ) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc device_has_operation @init failed!"); goto Error; } cudaStatus = cudaMemset(device_has_operation, 0, (sizeof(bool) * 2) ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_has_operation failed!"); goto Error; } //Finished std::cout <<"\n----\n"; return; Error: std::cout << "\nCritical CUDA Error. "; if (cudaStatus != cudaSuccess) { cudaError_t err = cudaGetLastError(); std::cout << "\nCRITICAL ERROR from init..."; fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, cudaGetErrorName(err), cudaGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } }; //Kernel - true_round used for bounding // __launch_bounds__(THREAD_PER_BLOCK, 16) //FOR V100,P100 __global__ void kernel_diff( unsigned char* device_dx, int* device_sbox_index, float* device_prob, int* device_branch_size, unsigned long long* device_cluster_size_final, double* device_prob_final, int* device_last_dx_ptr, bool* device_has_operation, unsigned long long* device_branches_sum_before_dx, unsigned long long* device_branch_size_thread, unsigned long long* device_branch_size_block, unsigned long long* device_total_branch_size_block){ // printf("\nInteger: %i, %i, block_size (threads per blocks) : %i",threadIdx.x, blockIdx.x,blockDim.x); grid_group grid = this_grid(); if (threadIdx.x <32){ if (threadIdx.x<16){ SPN_DIFF::diff_table_size_shared[threadIdx.x] = SPN_DIFF::diff_table_size_global[threadIdx.x]; for (int j = 0; j < 8; j++) { SPN_DIFF::diff_table_shared[threadIdx.x][j] = SPN_DIFF::diff_table_global[threadIdx.x][j]; SPN_DIFF::prob_table_shared[threadIdx.x][j] = SPN_DIFF::prob_table_global[threadIdx.x][j]; } } SPN_DIFF::prob_per_as_shared[threadIdx.x] = powf(CLUSTER_1AS_BEST_PROB, threadIdx.x+1); SPN_DIFF::prob_per_round_remaining_shared[threadIdx.x] = powf(CLUSTER_PROB_INDIV, threadIdx.x); } __syncthreads(); //Computing target array index (id and output_range) //I - THREAD ID / total thread (including all block) - Used to coordinate splitting of tasks const int thread_id_default = (blockIdx.x * blockDim.x) + threadIdx.x; int cur_r = 0; int flip_0_1 = 0; // int flip_iter = 0; long long cur_iter = -1; //This has to be signed int dx_ptr_shared[MAX_ROUND_FORWARD]; // int dx_ptr[MAX_ROUND_FORWARD] = {0}; //From 0- DX_num for that rounds unsigned long long branch_sum_before_dx_ptr_shared[MAX_ROUND_FORWARD] = {0}; unsigned long long branch_sum_before_block_thread_ptr_shared[MAX_ROUND_FORWARD] = {0}; unsigned long long branch_sum_before_block_ptr_shared[MAX_ROUND_FORWARD] = {0}; long long iter_shared[MAX_ROUND_FORWARD]; //Has to be signed //Preparation //Prepare array to store, each entry to next rounds will require the storing and restoring of these into local memory int thread_id_arr[MAX_ROUND_FORWARD]; //Each round GRID_THREAD_SIZE is added to this int cur_thread_id; //Default value does not matter int dx_ptr = 0; unsigned long long branch_sum_before_dx_ptr = 0; unsigned long long branch_sum_before_block_thread_ptr = 0; //From to 0-Block_num for that rounds // unsigned long long branch_sum_before_block_ptr = 0; //IO, need to be retarget after every rounds. //Output //NOTE: 32 here need to be changed #define output_dx(x) (( device_dx + ( 16 * MAX_PATH_PER_ROUND * (cur_r+1)) + (16 * thread_id_default * MAX_SPACE_PER_THREAD) + (x * 16) )) #define output_sbox_index(x) (( device_sbox_index + ( MAX_AS * MAX_PATH_PER_ROUND * (cur_r+1) ) + (MAX_AS* thread_id_default * MAX_SPACE_PER_THREAD) + (x * MAX_AS) )) #define output_prob(x) (( device_prob + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x)) #define output_branch_size(x) (( device_branch_size + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x )) #define output_branch_size_thread() ((device_branch_size_thread + ( GRID_THREAD_SIZE * (cur_r+1) ) + thread_id_default)) #define output_branch_size_block() ((device_branch_size_block + (BLOCK_NUM * (cur_r+1)) + blockIdx.x)) #define output_branch_size_all() ((device_branch_size_block-2)) // unsigned long long* output_branch_size_all = (device_branch_size_block-2); //Input #define cur_dx() ( ( device_dx + (16 * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_sbox_index() ( ( device_sbox_index + (MAX_AS * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_prob() ( ( device_prob + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size() ( ( device_branch_size + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size_thread() ( ( device_branch_size_thread + (GRID_THREAD_SIZE * cur_r) ) ) #define cur_branch_size_block() ( (device_branch_size_block + (BLOCK_NUM * cur_r)) ) //Mainloop - while(true){ //Base case, cur_0 and reamining == 0 bool has_operation = false; int increment = 0; //Determine output save position.. unsigned long long thread_branch_num_so_far = 0; //Allow accumulaction of block_thread branch_num (atomic_add with each reset) if(cur_iter == -1){ cur_iter = output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE + (output_branch_size_all()[flip_0_1] % GRID_THREAD_SIZE != 0); cur_thread_id = thread_id_default * cur_iter; } //calculate block_thread_ptr.. initial int block_thread_ptr = dx_ptr/MAX_SPACE_PER_THREAD; int block_ptr = block_thread_ptr / THREAD_PER_BLOCK; //Find the correct DX with three layer int loop_limit = cur_iter<MAX_SPACE_PER_THREAD?cur_iter:MAX_SPACE_PER_THREAD; for (int i=0;i<loop_limit;i++){ if (dx_ptr < MAX_PATH_PER_ROUND){ branch_sum_before_block_thread_ptr = branch_sum_before_block_thread_ptr_shared[cur_r]; //Shortcut if (cur_thread_id < (cur_branch_size()[dx_ptr] + branch_sum_before_dx_ptr) && cur_thread_id < (cur_branch_size_thread()[block_thread_ptr] + branch_sum_before_block_thread_ptr) ){ goto finfinddx; } //Find the correct block unsigned long long cur_branch_size_reg0,cur_branch_size_reg1,cur_branch_size_reg2,cur_branch_size_reg3; unsigned long long cur_branch_size_reg[4]; unsigned long long branches_temp = branch_sum_before_block_ptr_shared[cur_r]; int initial_block_ptr = block_ptr; while(true){ cur_branch_size_reg0 = cur_branch_size_block()[block_ptr]; cur_branch_size_reg1 = cur_branch_size_block()[block_ptr+1]; cur_branch_size_reg2 = cur_branch_size_block()[block_ptr+2]; cur_branch_size_reg3 = cur_branch_size_block()[block_ptr+3]; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg0; if (cur_branch_size_reg0 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg0; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg1; if (cur_branch_size_reg1 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg1; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg2; if (cur_branch_size_reg2 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg2; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg3; if (cur_branch_size_reg3 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg3; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; } if (true){ hasdx: int initial_block_thread_ptr = block_thread_ptr; if (initial_block_ptr == block_ptr){ //Found out block does not move branches_temp = branch_sum_before_block_thread_ptr; //Take the old branch_size for block_thread (start off with offset block thread) //block_thread_ptr remain unchanged... } else{ //New Block block_thread_ptr = block_ptr * THREAD_PER_BLOCK; //Point to the 1st element of the block //branches_temp remain unchanged because we are starting at 1st element... } //Find the correct block thread bool is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size_thread()[block_thread_ptr]; cur_branch_size_reg[1] = cur_branch_size_thread()[block_thread_ptr+1]; cur_branch_size_reg[2] = cur_branch_size_thread()[block_thread_ptr+2]; cur_branch_size_reg[3] = cur_branch_size_thread()[block_thread_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; block_thread_ptr += i; is_found = true; break; } } if(!is_found){ block_thread_ptr += 4; } } branch_sum_before_block_thread_ptr_shared[cur_r] = branches_temp; //Advance the dx position if needed (different block_thread location) if (block_thread_ptr == initial_block_thread_ptr){ //Start at the same location branches_temp = branch_sum_before_dx_ptr; } else{ dx_ptr = block_thread_ptr * MAX_SPACE_PER_THREAD; } //Find the correct dx position is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size()[dx_ptr]; cur_branch_size_reg[1] = cur_branch_size()[dx_ptr+1]; cur_branch_size_reg[2] = cur_branch_size()[dx_ptr+2]; cur_branch_size_reg[3] = cur_branch_size()[dx_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; dx_ptr += i; is_found = true; break; } } if(!is_found){ dx_ptr += 4; } } branch_sum_before_dx_ptr = branches_temp; } else{ //Nothing here } } finfinddx: ; if (dx_ptr < MAX_PATH_PER_ROUND){ //If dx_ptr is within dx_num, [0-N) has_operation = true; float prob_thread = 1.0; int divide_factor = 1; unsigned int diff_freq_index; //0-16 only unsigned int remaining_value = cur_thread_id - branch_sum_before_dx_ptr ; //7^8 is less than 32 bit... unsigned char* cur_dx_temp = cur_dx() + ( 16 * dx_ptr ) ; //NOTE: Need to modify to fit datastruct of different cipher int* cur_sbox_index_ptr = cur_sbox_index() + (MAX_AS * dx_ptr); unsigned char cur_thread_partial_dy[17]; cur_thread_partial_dy[16] = {0}; memcpy(cur_thread_partial_dy,cur_dx_temp,16); int cur_sbox_index_temp[MAX_AS]; memcpy(cur_sbox_index_temp, cur_sbox_index_ptr, sizeof(int) * MAX_AS); //Points to correct i_th branches of j_dx and so subs #pragma unroll for (int i = 0; i < MAX_AS; i++) { unsigned char cur_val = cur_thread_partial_dy[cur_sbox_index_temp[i]]; diff_freq_index = (remaining_value / divide_factor) % SPN_DIFF::diff_table_size_shared[cur_val]; cur_thread_partial_dy[cur_sbox_index_temp[i]] = SPN_DIFF::diff_table_shared[cur_val][diff_freq_index]; //Assigning target val to partial_dy prob_thread *= (SPN_DIFF::prob_table_shared[cur_val][diff_freq_index]); divide_factor *= SPN_DIFF::diff_table_size_shared[cur_val]; } prob_thread *= (*(cur_prob() + dx_ptr)); if (cur_r+1 != MAX_ROUND_FORWARD){ //Do Permutate unsigned long long front_64 = 0; #pragma unroll for (int i = 0; i < 16; i++) { if ( cur_thread_partial_dy[i] > 0) { front_64 |= SPN_DIFF::perm_lookup_global_forward[i][cur_thread_partial_dy[i]]; } } #pragma unroll for (int i = 0; i < 16; i++) { cur_thread_partial_dy[i] = (front_64 >> ((15 - i) * 4)) & 0xf; } //Calculte sbox index and sbox number int save_sbox_num = 0; int save_branch_size = 1; int save_sbox_index[16]; //Will point to non existance 32 array entry (see substitution below) #pragma unroll for (int i=0;i< 16;i++){ save_sbox_index[i] = 16; } #pragma unroll for (int i = 0; i < 16; i++) { if ((cur_thread_partial_dy[i] & 0xf) > 0) { save_branch_size *= SPN_DIFF::diff_table_size_shared[cur_thread_partial_dy[i]]; save_sbox_index[save_sbox_num] = i; save_sbox_num++; } } //Pruning // if(true){ if (save_sbox_num <= MAX_AS){ //If only next round AS <= 8 //MATSUI BOUND float estimated_com_prob = SPN_DIFF::prob_per_round_remaining_shared[(MAX_ROUND_FORWARD - cur_r - 2)] * SPN_DIFF::prob_per_as_shared[save_sbox_num-1]; //NOTE: this bound is less tight when round entered is not zero.. if ((estimated_com_prob * prob_thread) >= SPN_DIFF::CLUSTER_PROB_BOUND_const) { // if (true) { memcpy(output_dx(increment),cur_thread_partial_dy,16); *output_prob(increment) = prob_thread; memcpy(output_sbox_index(increment), save_sbox_index, sizeof(int) * MAX_AS ); *output_branch_size(increment) = save_branch_size; thread_branch_num_so_far += save_branch_size; increment += 1; } // else{ *output_branch_size = 0;} } // else{ *output_branch_size = 0;} } //LAST ROUNDS... no permutation and straight to savings. else{ bool is_same = true; #pragma unroll for (int i=0;i<16;i++){ if (SPN_DIFF::final_dy_constant[i] != cur_thread_partial_dy[i]){ is_same= false; break; } } if (is_same){ device_prob_final[thread_id_default] += prob_thread; device_cluster_size_final[thread_id_default] += 1; } } } cur_thread_id+=1; } cur_thread_id-=1; cur_iter = cur_iter - loop_limit; if(thread_id_default == 0){ *(device_has_operation + (flip_0_1) ) = has_operation; } if (cur_r != MAX_ROUND_FORWARD-1){ *output_branch_size_thread() += thread_branch_num_so_far; // so_far will be reset each sync thus adding like this is correct. atomicAdd(&SPN_DIFF::branch_size_block_shared[0], thread_branch_num_so_far); __syncthreads(); if (threadIdx.x==0){ // iter_shared[cur_r] = cur_iter; *output_branch_size_block() += SPN_DIFF::branch_size_block_shared[0]; //Since the operation is once per output round, reset the stuff here atomicAdd( (output_branch_size_all()+!flip_0_1), SPN_DIFF::branch_size_block_shared[0]); SPN_DIFF::branch_size_block_shared[0] = 0; } } grid.sync(); //Wait for grid to synchronize before continue if (thread_id_default==0){ output_branch_size_all()[flip_0_1] = 0; } grid.sync(); // has_operation = *(device_has_operation + (flip_0_1) ); flip_0_1 = !flip_0_1; if(true){ if (cur_r != MAX_ROUND_FORWARD-1 && has_operation){ //Is not last round and has operation //Goes forwards iter_shared[cur_r] = cur_iter; dx_ptr_shared[cur_r] = dx_ptr; branch_sum_before_dx_ptr_shared[cur_r] = branch_sum_before_dx_ptr; thread_id_arr[cur_r] = cur_thread_id; cur_r+=1; // cur_thread_id = thread_id_default; //NOTE: does not matter dx_ptr = 0; branch_sum_before_dx_ptr = 0; branch_sum_before_block_thread_ptr_shared[cur_r] = 0; branch_sum_before_block_ptr_shared[cur_r] = 0; cur_iter = -1; //Signal the requirement of intiialization if (cur_r!=MAX_ROUND_FORWARD-1){ *output_branch_size_thread() = 0; //HAs to be reset because of tunneling if (threadIdx.x==0){ *output_branch_size_block() = 0; } } } else if(!has_operation || (cur_r == MAX_ROUND_FORWARD-1 && cur_iter == 0) ){ //Has no operation => cur_iter == 0, //Goes backwards if last rounds or current rounds does not process anythings. do{ cur_r-=1; if(cur_r == -1){ return; //NOTE: Completed computation, Base Case } cur_iter = iter_shared[cur_r]; }while(cur_iter==0); cur_thread_id = thread_id_arr[cur_r] + 1; dx_ptr = dx_ptr_shared[cur_r]; branch_sum_before_dx_ptr = branch_sum_before_dx_ptr_shared[cur_r]; *output_branch_size_thread() = 0; if (threadIdx.x==0){ *output_branch_size_block() = 0; } } else{ //Has operation and is last round and cur_iter != 0 //Repeat last rounds. cur_thread_id += 1; } } } }; __launch_bounds__(THREAD_PER_BLOCK, 8) //FOR V100,P100 __global__ void kernel_diff_mitm( unsigned char* device_dx, int* device_sbox_index, float* device_prob, int* device_branch_size, unsigned long long* device_cluster_size_final, double* device_prob_final, int* device_last_dx_ptr, bool* device_has_operation, unsigned long long* device_branches_sum_before_dx, unsigned long long* device_branch_size_thread, unsigned long long* device_branch_size_block, unsigned long long* device_total_branch_size_block, float* MITM_prob_interm_global, unsigned long long* MITM_size_interm_global){ // printf("\nInteger: %i, %i, block_size (threads per blocks) : %i",threadIdx.x, blockIdx.x,blockDim.x); grid_group grid = this_grid(); if (threadIdx.x <32){ if (threadIdx.x<16){ SPN_DIFF::diff_table_size_shared[threadIdx.x] = SPN_DIFF::diff_table_size_global[threadIdx.x]; for (int j = 0; j < 8; j++) { SPN_DIFF::diff_table_shared[threadIdx.x][j] = SPN_DIFF::diff_table_global[threadIdx.x][j]; SPN_DIFF::prob_table_shared[threadIdx.x][j] = SPN_DIFF::prob_table_global[threadIdx.x][j]; } } SPN_DIFF::prob_per_as_shared[threadIdx.x] = powf(CLUSTER_1AS_BEST_PROB, threadIdx.x+1); SPN_DIFF::prob_per_round_remaining_shared[threadIdx.x] = powf(CLUSTER_PROB_INDIV, threadIdx.x); } __syncthreads(); //wait for init to be finished, sync up all threads within a block... shared memory lies within each block. //Computing target array index (id and output_range) //I - THREAD ID / total thread (including all block) - Used to coordinate splitting of tasks const int thread_id_default = (blockIdx.x * blockDim.x) + threadIdx.x; int cur_r = 0; int flip_0_1 = 0; long long cur_iter = -1; //This has to be signed int dx_ptr_shared[MAX_ROUND_FORWARD]; unsigned long long branch_sum_before_dx_ptr_shared[MAX_ROUND_FORWARD] = {0}; unsigned long long branch_sum_before_block_thread_ptr_shared[MAX_ROUND_FORWARD] = {0}; unsigned long long branch_sum_before_block_ptr_shared[MAX_ROUND_FORWARD] = {0}; long long iter_shared[MAX_ROUND_FORWARD]; //Has to be signed //Preparation //Prepare array to store, each entry to next rounds will require the storing and restoring of these into local memory int thread_id_arr[MAX_ROUND_FORWARD]; //Each round GRID_THREAD_SIZE is added to this int cur_thread_id; //Default value does not matter // int dx_ptr[MAX_ROUND_FORWARD] = {0}; //From 0- DX_num for that rounds int dx_ptr = 0; unsigned long long branch_sum_before_dx_ptr = 0; unsigned long long branch_sum_before_block_thread_ptr = 0; //From to 0-Block_num for that rounds // unsigned long long branch_sum_before_block_ptr = 0; //IO, need to be retarget after every rounds. //Output #define output_dx(x) (( device_dx + ( 16 * MAX_PATH_PER_ROUND * (cur_r+1)) + (16 * thread_id_default * MAX_SPACE_PER_THREAD) + (x * 16) )) #define output_sbox_index(x) (( device_sbox_index + ( MAX_AS * MAX_PATH_PER_ROUND * (cur_r+1) ) + (MAX_AS* thread_id_default * MAX_SPACE_PER_THREAD) + (x * MAX_AS) )) #define output_prob(x) (( device_prob + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x)) #define output_branch_size(x) (( device_branch_size + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x )) #define output_branch_size_thread() ((device_branch_size_thread + ( GRID_THREAD_SIZE * (cur_r+1) ) + thread_id_default)) #define output_branch_size_block() ((device_branch_size_block + (BLOCK_NUM * (cur_r+1)) + blockIdx.x)) #define output_branch_size_all() ((device_branch_size_block-2)) // unsigned long long* output_branch_size_all = (device_branch_size_block-2); //Input #define cur_dx() ( ( device_dx + (16 * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_sbox_index() ( ( device_sbox_index + (MAX_AS * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_prob() ( ( device_prob + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size() ( ( device_branch_size + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size_thread() ( ( device_branch_size_thread + (GRID_THREAD_SIZE * cur_r) ) ) #define cur_branch_size_block() ( (device_branch_size_block + (BLOCK_NUM * cur_r)) ) //Mainloop - while(true){ //Base case, cur_0 and reamining == 0 bool has_operation = false; int increment = 0; //Determine output save position.. unsigned long long thread_branch_num_so_far = 0; //Allow accumulaction of block_thread branch_num (atomic_add with each reset) if(cur_iter == -1){ // cur_iter = ceil(1.0 * output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE); cur_iter = output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE + (output_branch_size_all()[flip_0_1] % GRID_THREAD_SIZE != 0); cur_thread_id = thread_id_default * cur_iter; // flip_iter = !flip_iter; } //calculate block_thread_ptr.. initial int block_thread_ptr = dx_ptr/MAX_SPACE_PER_THREAD; int block_ptr = block_thread_ptr / THREAD_PER_BLOCK; //Find the correct DX with three layer int loop_limit = cur_iter<MAX_SPACE_PER_THREAD?cur_iter:MAX_SPACE_PER_THREAD; for (int i=0;i<loop_limit;i++){ if (dx_ptr < MAX_PATH_PER_ROUND){ branch_sum_before_block_thread_ptr = branch_sum_before_block_thread_ptr_shared[cur_r]; //Shortcut if (cur_thread_id < (cur_branch_size()[dx_ptr] + branch_sum_before_dx_ptr) && cur_thread_id < (cur_branch_size_thread()[block_thread_ptr] + branch_sum_before_block_thread_ptr) ){ goto finfinddx; } //Find the correct block unsigned long long cur_branch_size_reg0,cur_branch_size_reg1,cur_branch_size_reg2,cur_branch_size_reg3; unsigned long long cur_branch_size_reg[4]; unsigned long long branches_temp = branch_sum_before_block_ptr_shared[cur_r]; int initial_block_ptr = block_ptr; while(true){ cur_branch_size_reg0 = cur_branch_size_block()[block_ptr]; cur_branch_size_reg1 = cur_branch_size_block()[block_ptr+1]; cur_branch_size_reg2 = cur_branch_size_block()[block_ptr+2]; cur_branch_size_reg3 = cur_branch_size_block()[block_ptr+3]; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg0; if (cur_branch_size_reg0 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg0; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg1; if (cur_branch_size_reg1 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg1; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg2; if (cur_branch_size_reg2 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg2; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg3; if (cur_branch_size_reg3 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg3; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; } if (true){ hasdx: int initial_block_thread_ptr = block_thread_ptr; if (initial_block_ptr == block_ptr){ //Found out block does not move branches_temp = branch_sum_before_block_thread_ptr; //Take the old branch_size for block_thread (start off with offset block thread) //block_thread_ptr remain unchanged... } else{ //New Block block_thread_ptr = block_ptr * THREAD_PER_BLOCK; //Point to the 1st element of the block //branches_temp remain unchanged because we are starting at 1st element... } //Find the correct block thread bool is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size_thread()[block_thread_ptr]; cur_branch_size_reg[1] = cur_branch_size_thread()[block_thread_ptr+1]; cur_branch_size_reg[2] = cur_branch_size_thread()[block_thread_ptr+2]; cur_branch_size_reg[3] = cur_branch_size_thread()[block_thread_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; block_thread_ptr += i; is_found = true; break; } } if(!is_found){ block_thread_ptr += 4; } } branch_sum_before_block_thread_ptr_shared[cur_r] = branches_temp; //Advance the dx position if needed (different block_thread location) if (block_thread_ptr == initial_block_thread_ptr){ //Start at the same location branches_temp = branch_sum_before_dx_ptr; } else{ dx_ptr = block_thread_ptr * MAX_SPACE_PER_THREAD; } //Find the correct dx position is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size()[dx_ptr]; cur_branch_size_reg[1] = cur_branch_size()[dx_ptr+1]; cur_branch_size_reg[2] = cur_branch_size()[dx_ptr+2]; cur_branch_size_reg[3] = cur_branch_size()[dx_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ //NOTE: no need to check out of bounds if correctly impleneted, it will be filtered out at block level.. branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; dx_ptr += i; is_found = true; break; } } if(!is_found){ dx_ptr += 4; } } branch_sum_before_dx_ptr = branches_temp; } else{ //Nothing here } } finfinddx: ; if (dx_ptr < MAX_PATH_PER_ROUND){ //If dx_ptr is within dx_num, [0-N) has_operation = true; float prob_thread = 1.0; int divide_factor = 1; unsigned int diff_freq_index; //0-16 only unsigned int remaining_value = cur_thread_id - branch_sum_before_dx_ptr ; //7^8 is less than 32 bit... unsigned char* cur_dx_temp = cur_dx() + ( 16 * dx_ptr ) ; //NOTE: Need to modify to fit datastruct of different cipher int* cur_sbox_index_ptr = cur_sbox_index() + (MAX_AS * dx_ptr); unsigned char cur_thread_partial_dy[17]; cur_thread_partial_dy[16] = {0}; memcpy(cur_thread_partial_dy,cur_dx_temp,16); int cur_sbox_index_temp[MAX_AS]; memcpy(cur_sbox_index_temp, cur_sbox_index_ptr, sizeof(int) * MAX_AS); //Points to correct i_th branches of j_dx and so subs #pragma unroll for (int i = 0; i < MAX_AS; i++) { unsigned char cur_val = cur_thread_partial_dy[cur_sbox_index_temp[i]]; diff_freq_index = (remaining_value / divide_factor) % SPN_DIFF::diff_table_size_shared[cur_val]; cur_thread_partial_dy[cur_sbox_index_temp[i]] = SPN_DIFF::diff_table_shared[cur_val][diff_freq_index]; //Assigning target val to partial_dy prob_thread *= (SPN_DIFF::prob_table_shared[cur_val][diff_freq_index]); divide_factor *= SPN_DIFF::diff_table_size_shared[cur_val]; } prob_thread *= (*(cur_prob() + dx_ptr)); //Do Permutate unsigned long long front_64 = 0; #pragma unroll for (int i = 0; i < 16; i++) { if ( cur_thread_partial_dy[i] > 0) { //Permutation LUTable //TODO: require modify to feed in correct forward/backward front_64 |= SPN_DIFF::perm_lookup_global_forward[i][cur_thread_partial_dy[i]]; } } #pragma unroll for (int i = 0; i < 16; i++) { cur_thread_partial_dy[i] = (front_64 >> ((15 - i) * 4)) & 0xf; } if (cur_r != MAX_ROUND_FORWARD-1){ //Calculte sbox index and sbox number int save_sbox_num = 0; int save_branch_size = 1; int save_sbox_index[16]; //Will point to non existance 32 array entry (see substitution below) #pragma unroll for (int i=0;i< 16;i++){ save_sbox_index[i] = 16; } #pragma unroll for (int i = 0; i < 16; i++) { if ((cur_thread_partial_dy[i] & 0xf) > 0) { save_branch_size *= SPN_DIFF::diff_table_size_shared[cur_thread_partial_dy[i]]; save_sbox_index[save_sbox_num] = i; save_sbox_num++; } } //Pruning // if(true){ if (save_sbox_num <= MAX_AS){ //If only next round AS <= 8 //MATSUI BOUND float estimated_com_prob = SPN_DIFF::prob_per_round_remaining_shared[(MAX_ROUND_FORWARD - cur_r - 2)] * SPN_DIFF::prob_per_as_shared[save_sbox_num-1]; //NOTE: this bound is less tight when round entered is not zero.. if ((estimated_com_prob * prob_thread) >= SPN_DIFF::CLUSTER_PROB_BOUND_const) { // if (true) { memcpy(output_dx(increment),cur_thread_partial_dy,16); *output_prob(increment) = prob_thread; memcpy(output_sbox_index(increment), save_sbox_index, sizeof(int) * MAX_AS ); *output_branch_size(increment) = save_branch_size; thread_branch_num_so_far += save_branch_size; increment += 1; } // else{ *output_branch_size = 0;} } // else{ *output_branch_size = 0;} } //LAST ROUNDS... no permutation and straight to savings. else{ int sbox_num=0; int sbox_index[16]={0}; #pragma unroll for (int i=0;i<16;i++){ if (cur_thread_partial_dy[i] !=0){ sbox_index[sbox_num] = i; sbox_num+=1; } } if (sbox_num <=3){ //Possible to store three only... //Computing appropriate index int index=0; #pragma unroll for (int i=0;i<sbox_num;i++){ index|= ( ( (sbox_index[i]&0b11111) | ( (cur_thread_partial_dy[sbox_index[i]]&0b1111) << 5) ) << (i * 9) ); } atomicAdd(MITM_prob_interm_global+index,prob_thread); atomicAdd(MITM_size_interm_global+index,1); } } } cur_thread_id+=1; } cur_thread_id-=1; // cur_iter = cur_iter > 0?cur_iter-loop_limit:cur_iter; //Make sure >0 => -1, if 0 left it cur_iter = cur_iter - loop_limit; if(thread_id_default == 0){ *(device_has_operation + (flip_0_1) ) = has_operation; } if (cur_r != MAX_ROUND_FORWARD-1){ *output_branch_size_thread() += thread_branch_num_so_far; // so_far will be reset each sync thus adding like this is correct. atomicAdd(&SPN_DIFF::branch_size_block_shared[0], thread_branch_num_so_far); __syncthreads(); if (threadIdx.x==0){ // iter_shared[cur_r] = cur_iter; *output_branch_size_block() += SPN_DIFF::branch_size_block_shared[0]; //Since the operation is once per output round, reset the stuff here atomicAdd( (output_branch_size_all()+!flip_0_1), SPN_DIFF::branch_size_block_shared[0]); SPN_DIFF::branch_size_block_shared[0] = 0; } } grid.sync(); //Wait for grid to synchronize before continue if (thread_id_default==0){ output_branch_size_all()[flip_0_1] = 0; } grid.sync(); has_operation = *(device_has_operation + (flip_0_1) ); flip_0_1 = !flip_0_1; if(true){ if (cur_r != MAX_ROUND_FORWARD-1 && has_operation){ //Is not last round and has operation //Goes forwards iter_shared[cur_r] = cur_iter; dx_ptr_shared[cur_r] = dx_ptr; branch_sum_before_dx_ptr_shared[cur_r] = branch_sum_before_dx_ptr; thread_id_arr[cur_r] = cur_thread_id; cur_r+=1; dx_ptr = 0; branch_sum_before_dx_ptr = 0; branch_sum_before_block_thread_ptr_shared[cur_r] = 0; branch_sum_before_block_ptr_shared[cur_r] = 0; cur_iter = -1; //Signal the requirement of intiialization if (cur_r!=MAX_ROUND_FORWARD-1){ *output_branch_size_thread() = 0; //HAs to be reset because of tunneling if (threadIdx.x==0){ *output_branch_size_block() = 0; } } } else if(!has_operation || (cur_r == MAX_ROUND_FORWARD-1 && cur_iter == 0) ){ //Has no operation => cur_iter == 0, //Goes backwards if last rounds or current rounds does not process anythings. do{ cur_r-=1; if(cur_r == -1){ return; //NOTE: Completed computation, Base Case } cur_iter = iter_shared[cur_r]; }while(cur_iter==0); cur_thread_id = thread_id_arr[cur_r] + 1; dx_ptr = dx_ptr_shared[cur_r]; branch_sum_before_dx_ptr = branch_sum_before_dx_ptr_shared[cur_r]; *output_branch_size_thread() = 0; if (threadIdx.x==0){ *output_branch_size_block() = 0; } } else{ //Has operation and is last round and cur_iter != 0 //Repeat last rounds. cur_thread_id += 1; } } } }; __launch_bounds__(THREAD_PER_BLOCK, 8) __global__ void kernel_diff_mitm_backward( unsigned char* device_dx, int* device_sbox_index, float* device_prob, int* device_branch_size, unsigned long long* device_cluster_size_final, double* device_prob_final, int* device_last_dx_ptr, bool* device_has_operation, unsigned long long* device_branches_sum_before_dx, unsigned long long* device_branch_size_thread, unsigned long long* device_branch_size_block, unsigned long long* device_total_branch_size_block, float* MITM_prob_interm_global, unsigned long long* MITM_size_interm_global){ // printf("\nInteger: %i, %i, block_size (threads per blocks) : %i",threadIdx.x, blockIdx.x,blockDim.x); grid_group grid = this_grid(); if (threadIdx.x <32){ if (threadIdx.x<16){ SPN_DIFF::diff_table_size_shared[threadIdx.x] = SPN_DIFF::diff_table_size_global_reversed[threadIdx.x]; for (int j = 0; j < 8; j++) { SPN_DIFF::diff_table_shared[threadIdx.x][j] = SPN_DIFF::diff_table_global_reversed[threadIdx.x][j]; SPN_DIFF::prob_table_shared[threadIdx.x][j] = SPN_DIFF::prob_table_global_reversed[threadIdx.x][j]; } } SPN_DIFF::prob_per_as_shared[threadIdx.x] = powf(CLUSTER_1AS_BEST_PROB, threadIdx.x+1); SPN_DIFF::prob_per_round_remaining_shared[threadIdx.x] = powf(CLUSTER_PROB_INDIV, threadIdx.x); } __syncthreads(); //wait for init to be finished, sync up all threads within a block... shared memory lies within each block. //Computing target array index (id and output_range) //I - THREAD ID / total thread (including all block) - Used to coordinate splitting of tasks const int thread_id_default = (blockIdx.x * blockDim.x) + threadIdx.x; int cur_r = 0; int flip_0_1 = 0; long long cur_iter = -1; //This has to be signed int dx_ptr_shared[MAX_ROUND_BACKWARD]; unsigned long long branch_sum_before_dx_ptr_shared[MAX_ROUND_BACKWARD] = {0}; unsigned long long branch_sum_before_block_thread_ptr_shared[MAX_ROUND_BACKWARD] = {0}; unsigned long long branch_sum_before_block_ptr_shared[MAX_ROUND_BACKWARD] = {0}; long long iter_shared[MAX_ROUND_BACKWARD]; //Has to be signed //Preparation //Prepare array to store, each entry to next rounds will require the storing and restoring of these into local memory int thread_id_arr[MAX_ROUND_BACKWARD]; //Each round GRID_THREAD_SIZE is added to this int cur_thread_id; //Default value does not matter // int dx_ptr[MAX_ROUND_FORWARD] = {0}; //From 0- DX_num for that rounds int dx_ptr = 0; unsigned long long branch_sum_before_dx_ptr = 0; unsigned long long branch_sum_before_block_thread_ptr = 0; //From to 0-Block_num for that rounds // unsigned long long branch_sum_before_block_ptr = 0; //IO, need to be retarget after every rounds. //Output #define output_dx(x) (( device_dx + ( 16 * MAX_PATH_PER_ROUND * (cur_r+1)) + (16 * thread_id_default * MAX_SPACE_PER_THREAD) + (x * 16) )) #define output_sbox_index(x) (( device_sbox_index + ( MAX_AS * MAX_PATH_PER_ROUND * (cur_r+1) ) + (MAX_AS* thread_id_default * MAX_SPACE_PER_THREAD) + (x * MAX_AS) )) #define output_prob(x) (( device_prob + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x)) #define output_branch_size(x) (( device_branch_size + ( MAX_PATH_PER_ROUND * (cur_r+1) ) + (thread_id_default * MAX_SPACE_PER_THREAD) + x )) #define output_branch_size_thread() ((device_branch_size_thread + ( GRID_THREAD_SIZE * (cur_r+1) ) + thread_id_default)) #define output_branch_size_block() ((device_branch_size_block + (BLOCK_NUM * (cur_r+1)) + blockIdx.x)) #define output_branch_size_all() ((device_branch_size_block-2)) // unsigned long long* output_branch_size_all = (device_branch_size_block-2); //Input #define cur_dx() ( ( device_dx + (16 * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_sbox_index() ( ( device_sbox_index + (MAX_AS * MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_prob() ( ( device_prob + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size() ( ( device_branch_size + (MAX_PATH_PER_ROUND * cur_r) ) ) #define cur_branch_size_thread() ( ( device_branch_size_thread + (GRID_THREAD_SIZE * cur_r) ) ) #define cur_branch_size_block() ( (device_branch_size_block + (BLOCK_NUM * cur_r)) ) //Mainloop - while(true){ //Base case, cur_0 and reamining == 0 bool has_operation = false; int increment = 0; //Determine output save position.. unsigned long long thread_branch_num_so_far = 0; //Allow accumulaction of block_thread branch_num (atomic_add with each reset) if(cur_iter == -1){ // cur_iter = ceil(1.0 * output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE); cur_iter = output_branch_size_all()[flip_0_1]/GRID_THREAD_SIZE + (output_branch_size_all()[flip_0_1] % GRID_THREAD_SIZE != 0); cur_thread_id = thread_id_default * cur_iter; } //calculate block_thread_ptr.. initial int block_thread_ptr = dx_ptr/MAX_SPACE_PER_THREAD; int block_ptr = block_thread_ptr / THREAD_PER_BLOCK; //Find the correct DX with three layer int loop_limit = cur_iter<MAX_SPACE_PER_THREAD?cur_iter:MAX_SPACE_PER_THREAD; for (int i=0;i<loop_limit;i++){ if (dx_ptr < MAX_PATH_PER_ROUND){ branch_sum_before_block_thread_ptr = branch_sum_before_block_thread_ptr_shared[cur_r]; //Shortcut if (cur_thread_id < (cur_branch_size()[dx_ptr] + branch_sum_before_dx_ptr) && cur_thread_id < (cur_branch_size_thread()[block_thread_ptr] + branch_sum_before_block_thread_ptr) ){ goto finfinddx; } //Find the correct block unsigned long long cur_branch_size_reg0,cur_branch_size_reg1,cur_branch_size_reg2,cur_branch_size_reg3; unsigned long long cur_branch_size_reg[4]; unsigned long long branches_temp = branch_sum_before_block_ptr_shared[cur_r]; int initial_block_ptr = block_ptr; while(true){ cur_branch_size_reg0 = cur_branch_size_block()[block_ptr]; cur_branch_size_reg1 = cur_branch_size_block()[block_ptr+1]; cur_branch_size_reg2 = cur_branch_size_block()[block_ptr+2]; cur_branch_size_reg3 = cur_branch_size_block()[block_ptr+3]; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg0; if (cur_branch_size_reg0 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg0; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg1; if (cur_branch_size_reg1 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg1; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg2; if (cur_branch_size_reg2 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg2; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; if( block_ptr >= BLOCK_NUM){ dx_ptr = MAX_PATH_PER_ROUND; //Indicate nothing to do, end of dx goto finfinddx; } branches_temp += cur_branch_size_reg3; if (cur_branch_size_reg3 != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg3; branch_sum_before_block_ptr_shared[cur_r] = branches_temp; goto hasdx; } block_ptr+=1; } if (true){ hasdx: int initial_block_thread_ptr = block_thread_ptr; if (initial_block_ptr == block_ptr){ //Found out block does not move branches_temp = branch_sum_before_block_thread_ptr; //Take the old branch_size for block_thread (start off with offset block thread) //block_thread_ptr remain unchanged... } else{ //New Block block_thread_ptr = block_ptr * THREAD_PER_BLOCK; //Point to the 1st element of the block //branches_temp remain unchanged because we are starting at 1st element... } //Find the correct block thread bool is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size_thread()[block_thread_ptr]; cur_branch_size_reg[1] = cur_branch_size_thread()[block_thread_ptr+1]; cur_branch_size_reg[2] = cur_branch_size_thread()[block_thread_ptr+2]; cur_branch_size_reg[3] = cur_branch_size_thread()[block_thread_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; block_thread_ptr += i; is_found = true; break; } } if(!is_found){ block_thread_ptr += 4; } } branch_sum_before_block_thread_ptr_shared[cur_r] = branches_temp; //Advance the dx position if needed (different block_thread location) if (block_thread_ptr == initial_block_thread_ptr){ //Start at the same location branches_temp = branch_sum_before_dx_ptr; } else{ dx_ptr = block_thread_ptr * MAX_SPACE_PER_THREAD; } //Find the correct dx position is_found = false; while(!is_found){ cur_branch_size_reg[0] = cur_branch_size()[dx_ptr]; cur_branch_size_reg[1] = cur_branch_size()[dx_ptr+1]; cur_branch_size_reg[2] = cur_branch_size()[dx_ptr+2]; cur_branch_size_reg[3] = cur_branch_size()[dx_ptr+3]; #pragma unroll for (int i=0;i<4;i++){ //NOTE: no need to check out of bounds if correctly impleneted, it will be filtered out at block level.. branches_temp += cur_branch_size_reg[i]; if (cur_branch_size_reg[i] != 0 && cur_thread_id < branches_temp){ branches_temp -= cur_branch_size_reg[i]; dx_ptr += i; is_found = true; break; } } if(!is_found){ dx_ptr += 4; } } branch_sum_before_dx_ptr = branches_temp; } else{ //Nothing here } } finfinddx: ; if (dx_ptr < MAX_PATH_PER_ROUND){ //If dx_ptr is within dx_num, [0-N) has_operation = true; float prob_thread = 1.0; int divide_factor = 1; unsigned int diff_freq_index; //0-16 only unsigned int remaining_value = cur_thread_id - branch_sum_before_dx_ptr ; //7^8 is less than 32 bit... unsigned char* cur_dx_temp = cur_dx() + ( 16 * dx_ptr ) ; //NOTE: Need to modify to fit datastruct of different cipher int* cur_sbox_index_ptr = cur_sbox_index() + (MAX_AS * dx_ptr); unsigned char cur_thread_partial_dy[17]; cur_thread_partial_dy[16] = {0}; memcpy(cur_thread_partial_dy,cur_dx_temp,16); int cur_sbox_index_temp[MAX_AS]; memcpy(cur_sbox_index_temp, cur_sbox_index_ptr, sizeof(int) * MAX_AS); //Points to correct i_th branches of j_dx and so subs #pragma unroll for (int i = 0; i < MAX_AS; i++) { unsigned char cur_val = cur_thread_partial_dy[cur_sbox_index_temp[i]]; diff_freq_index = (remaining_value / divide_factor) % SPN_DIFF::diff_table_size_shared[cur_val]; cur_thread_partial_dy[cur_sbox_index_temp[i]] = SPN_DIFF::diff_table_shared[cur_val][diff_freq_index]; //Assigning target val to partial_dy prob_thread *= (SPN_DIFF::prob_table_shared[cur_val][diff_freq_index]); divide_factor *= SPN_DIFF::diff_table_size_shared[cur_val]; } prob_thread *= (*(cur_prob() + dx_ptr)); if (cur_r+1 != MAX_ROUND_BACKWARD){ //Do Permutate unsigned long long front_64 = 0; #pragma unroll for (int i = 0; i < 16; i++) { if ( cur_thread_partial_dy[i] > 0) { //Permutation LUTable front_64 |= SPN_DIFF::perm_lookup_global_reversed[i][cur_thread_partial_dy[i]]; } } #pragma unroll for (int i = 0; i < 16; i++) { cur_thread_partial_dy[i] = (front_64 >> ((15 - i) * 4)) & 0xf; } //Calculte sbox index and sbox number int save_sbox_num = 0; int save_branch_size = 1; int save_sbox_index[16]; //Will point to non existance 32 array entry (see substitution below) #pragma unroll for (int i=0;i< 16;i++){ save_sbox_index[i] = 16; } #pragma unroll for (int i = 0; i < 16; i++) { if ((cur_thread_partial_dy[i] & 0xf) > 0) { save_branch_size *= SPN_DIFF::diff_table_size_shared[cur_thread_partial_dy[i]]; save_sbox_index[save_sbox_num] = i; save_sbox_num++; } } //Pruning // if(true){ if (save_sbox_num <= MAX_AS){ //If only next round AS <= 8 //MATSUI BOUND float estimated_com_prob = SPN_DIFF::prob_per_round_remaining_shared[(MAX_ROUND_BACKWARD - cur_r - 2)] * SPN_DIFF::prob_per_as_shared[save_sbox_num-1]; //NOTE: this bound is less tight when round entered is not zero.. if ((estimated_com_prob * prob_thread) >= SPN_DIFF::CLUSTER_PROB_BOUND_const) { memcpy(output_dx(increment),cur_thread_partial_dy,16); *output_prob(increment) = prob_thread; memcpy(output_sbox_index(increment), save_sbox_index, sizeof(int) * MAX_AS ); *output_branch_size(increment) = save_branch_size; thread_branch_num_so_far += save_branch_size; increment += 1; } // else{ *output_branch_size = 0;} } // else{ *output_branch_size = 0;} } //LAST ROUNDS... no permutation and straight to savings. else{ int sbox_num=0; int sbox_index[16]={0}; #pragma unroll for (int i=0;i<16;i++){ if (cur_thread_partial_dy[i] !=0){ sbox_index[sbox_num] = i; sbox_num+=1; } } if (sbox_num <=3){ //Possible to store three only... //Computing appropriate index int index=0; #pragma unroll for (int i=0;i<sbox_num;i++){ index|= ( ( (sbox_index[i]&0b11111) | ( (cur_thread_partial_dy[sbox_index[i]]&0b1111) << 5) ) << (i * 9) ); } unsigned long long target_size = MITM_size_interm_global[index]; if(target_size > 0){ //Exist connection double target_prob = ( (1.0 * prob_thread) * MITM_prob_interm_global[index]); //DEBUG: enable back //Add to collection device_prob_final[thread_id_default] += target_prob; device_cluster_size_final[thread_id_default] += target_size; } } } } cur_thread_id+=1; } cur_thread_id-=1; // cur_iter = cur_iter > 0?cur_iter-loop_limit:cur_iter; //Make sure >0 => -1, if 0 left it cur_iter = cur_iter - loop_limit; if(thread_id_default == 0){ *(device_has_operation + (flip_0_1) ) = has_operation; } if (cur_r != MAX_ROUND_BACKWARD-1){ *output_branch_size_thread() += thread_branch_num_so_far; // so_far will be reset each sync thus adding like this is correct. atomicAdd(&SPN_DIFF::branch_size_block_shared[0], thread_branch_num_so_far); __syncthreads(); if (threadIdx.x==0){ // iter_shared[cur_r] = cur_iter; *output_branch_size_block() += SPN_DIFF::branch_size_block_shared[0]; //Since the operation is once per output round, reset the stuff here atomicAdd( (output_branch_size_all()+!flip_0_1), SPN_DIFF::branch_size_block_shared[0]); SPN_DIFF::branch_size_block_shared[0] = 0; } } grid.sync(); //Wait for grid to synchronize before continue if (thread_id_default==0){ output_branch_size_all()[flip_0_1] = 0; } grid.sync(); has_operation = *(device_has_operation + (flip_0_1) ); flip_0_1 = !flip_0_1; if(true){ if (cur_r != MAX_ROUND_BACKWARD-1 && has_operation){ //Is not last round and has operation //Goes forwards iter_shared[cur_r] = cur_iter; dx_ptr_shared[cur_r] = dx_ptr; branch_sum_before_dx_ptr_shared[cur_r] = branch_sum_before_dx_ptr; thread_id_arr[cur_r] = cur_thread_id; cur_r+=1; // cur_thread_id = thread_id_default; //NOTE: does not matter dx_ptr = 0; branch_sum_before_dx_ptr = 0; branch_sum_before_block_thread_ptr_shared[cur_r] = 0; branch_sum_before_block_ptr_shared[cur_r] = 0; cur_iter = -1; //Signal the requirement of intiialization if (cur_r!=MAX_ROUND_BACKWARD-1){ *output_branch_size_thread() = 0; //HAs to be reset because of tunneling if (threadIdx.x==0){ *output_branch_size_block() = 0; } } } else if(!has_operation || (cur_r == MAX_ROUND_BACKWARD-1 && cur_iter == 0) ){ //Has no operation => cur_iter == 0, //Goes backwards if last rounds or current rounds does not process anythings. do{ cur_r-=1; if(cur_r == -1){ return; //NOTE: Completed computation, Base Case } cur_iter = iter_shared[cur_r]; }while(cur_iter==0); cur_thread_id = thread_id_arr[cur_r] + 1; dx_ptr = dx_ptr_shared[cur_r]; branch_sum_before_dx_ptr = branch_sum_before_dx_ptr_shared[cur_r]; *output_branch_size_thread() = 0; if (threadIdx.x==0){ *output_branch_size_block() = 0; } } else{ //Has operation and is last round and cur_iter != 0 //Repeat last rounds. cur_thread_id += 1; } } } }; //Kernel Launch preparation from here //NOTE: Branch Size is assumed to be zero... void GPU_Kenerl_t::kernel_compute(int branch_size, unsigned char* dx, unsigned char* dy, int* sbox_index, int* sbox_num, int* nb_size, float* cur_prob, int cur_r, int target_round){ cudaError_t cudaStatus; if (branch_size >1){ printf("\nInitial DX > 1 size is not supported.."); return; } cudaStatus = cudaMemcpyToSymbol(SPN_DIFF::final_dy_constant, dy, sizeof(unsigned char)*16); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpyToSymbol final_dy_constant failed!"); } cudaStatus = cudaMemcpy(device_dx, dx, sizeof(unsigned char) * 16 * branch_size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy dx failed!"); } cudaStatus = cudaMemcpy(device_sbox_index, sbox_index, sizeof(int) * MAX_AS * branch_size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy sbox_index failed!"); } // cudaStatus = cudaMemcpy(device_sbox_num, sbox_num, sizeof(int) * branch_size, cudaMemcpyHostToDevice); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy sbox_num failed!"); // } cudaStatus = cudaMemcpy(device_prob, cur_prob, sizeof(float) * branch_size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy cur_prob failed!"); } cudaStatus = cudaMemcpy(device_branch_size, nb_size, sizeof(int) * branch_size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy nb_size failed!"); } unsigned long long* nb_size_longlong = new unsigned long long(); *nb_size_longlong = *nb_size; cudaStatus = cudaMemcpy(device_branch_size_block, nb_size_longlong, sizeof(unsigned long long), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy nb_size failed!"); } cudaStatus = cudaMemcpy(device_branch_size_thread, nb_size_longlong, sizeof(unsigned long long), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy cur_prob failed!"); } unsigned long long* device_branch_size_block2 = (device_branch_size_block+2); cudaStatus = cudaMemcpy(device_branch_size_block2, nb_size_longlong, sizeof(unsigned long long), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy nb_size failed!"); } // Starting Kernel // int *round_to_process = new int(); // *round_to_process = target_round - cur_r; int *round_offset = new int(); *round_offset = cur_r; // int *branch_size_ptr = new int(); // *branch_size_ptr = branch_size; // int round_to_process = target_round - cur_r; // int round_offset = cur_r; bool is_MITM =true; if (is_MITM){ void** args = new void*[14]; args[0] = &device_dx; args[1] = &device_sbox_index; args[2] = &device_prob; args[3] = &device_branch_size; args[4] = &device_cluster_size_final; args[5] = &device_prob_final; args[6] = &device_last_dx_ptr; args[7] = &device_has_operation; args[8] = &device_branches_sum_before_dx; args[9] = &device_branch_size_thread; args[10] = &device_branch_size_block2; args[11] = &device_total_branch_size_block; args[12] = &MITM_prob_interm_global; args[13] = &MITM_size_interm_global; dim3 dimGrid(BLOCK_NUM, 1, 1); dim3 dimBlock(THREAD_PER_BLOCK, 1, 1); std::cout << "\nTransfered constant matsui bound from host to device"; cudaStatus = cudaMemcpyToSymbol(SPN_DIFF::CLUSTER_PROB_BOUND_const, &CLUSTER_PROB_BOUND_FORWARD, sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpyToSymbol CLUSTER_PROB_BOUND_FORWARD failed!"); getchar(); exit(-1); } cudaStatus = cudaLaunchCooperativeKernel((void*) kernel_diff_mitm, dimGrid, dimBlock, args); if (cudaStatus != cudaSuccess) { cudaError_t err = cudaGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, cudaGetErrorName(err), cudaGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } // cudaStatus = cudaDeviceSynchronize(); // if (cudaStatus != cudaSuccess) { // cudaError_t err = cudaGetLastError(); // fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, cudaGetErrorName(err), cudaGetErrorString(err)); // std::cout << "\nExiting the program manually..."; // getchar(); // exit(-1); // } //Backwards cudaStatus = cudaMemset(device_branch_size_block+1, 0, sizeof(unsigned long long)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_branch_size_block+1 during backwards failed!"); } cudaStatus = cudaMemset(device_branch_size_block2+BLOCK_NUM, 0, sizeof(unsigned long long)*BLOCK_NUM ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_branch_size_block2 during backwards failed!"); } cudaStatus = cudaMemset(device_branch_size_thread + GRID_THREAD_SIZE, 0, sizeof(unsigned long long)*GRID_THREAD_SIZE); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset device_branch_size_thread during backwards failed!"); } //Transfer DY part std::cout << "\nTransfered constant matsui bound from host to device"; cudaStatus = cudaMemcpyToSymbol(SPN_DIFF::CLUSTER_PROB_BOUND_const, &CLUSTER_PROB_BOUND_BACKWARD, sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpyToSymbol CLUSTER_PROB_BOUND_BACKWARD failed!"); getchar(); exit(-1); } cudaStatus = cudaMemcpy(device_dx, dy, sizeof(unsigned char) * 16 * branch_size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy dx failed!"); } *nb_size = 1; int temp_index_ptr = 0; for (int i=0;i<16;i++){ sbox_index[i] = 16; } for (int i=0;i<16;i++){ // sbox_index[i] = dy[i] > 0? i : 0; if(dy[i]>0){ sbox_index[temp_index_ptr] = i; *(nb_size) = *(nb_size) * (SPN_DIFF::diff_table_size_host_reversed[dy[i]]); temp_index_ptr+=1; } } *cur_prob = 1.0f; cudaStatus = cudaMemcpy(device_sbox_index, sbox_index, sizeof(int) * MAX_AS * branch_size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy sbox_index failed!"); } cudaStatus = cudaMemcpy(device_prob, cur_prob, sizeof(float) * branch_size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy cur_prob failed!"); } cudaStatus = cudaMemcpy(device_branch_size, nb_size, sizeof(int) * branch_size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy nb_size failed!"); } *nb_size_longlong = *nb_size; //Because nb_size is int, cast to long long in this case. cudaStatus = cudaMemcpy(device_branch_size_block, nb_size_longlong, sizeof(unsigned long long), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy nb_size failed!"); } cudaStatus = cudaMemcpy(device_branch_size_thread, nb_size_longlong, sizeof(unsigned long long), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy cur_prob failed!"); } // unsigned long long* device_branch_size_block2 = (device_branch_size_block+2); cudaStatus = cudaMemcpy(device_branch_size_block2, nb_size_longlong, sizeof(unsigned long long), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy nb_size failed!"); } cudaStatus = cudaLaunchCooperativeKernel((void*) kernel_diff_mitm_backward, dimGrid, dimBlock, args); if (cudaStatus != cudaSuccess) { cudaError_t err = cudaGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, cudaGetErrorName(err), cudaGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { cudaError_t err = cudaGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, cudaGetErrorName(err), cudaGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } delete args; } else{ void** args = new void*[12]; // args[0] = &round_offset; // args[1] = &round_to_process; // args[2] = &branch_size; // int* device_sbox_index2 = device_sbox_index+1; // bool* device_has_operation2 = device_has_operation + 2; // args[0] = round_offset; // args[0] = round_to_process; // args[2] = branch_size_ptr; args[0] = &device_dx; args[1] = &device_sbox_index; args[2] = &device_prob; args[3] = &device_branch_size; args[4] = &device_cluster_size_final; args[5] = &device_prob_final; args[6] = &device_last_dx_ptr; args[7] = &device_has_operation; args[8] = &device_branches_sum_before_dx; args[9] = &device_branch_size_thread; args[10] = &device_branch_size_block2; args[11] = &device_total_branch_size_block; dim3 dimGrid(BLOCK_NUM, 1, 1); dim3 dimBlock(THREAD_PER_BLOCK, 1, 1); std::cout << "\nTransfered constant matsui bound from host to device"; cudaStatus = cudaMemcpyToSymbol(SPN_DIFF::CLUSTER_PROB_BOUND_const, &CLUSTER_PROB_BOUND, sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpyToSymbol CLUSTER_PROB_BOUND_const failed!"); getchar(); exit(-1); } cudaStatus = cudaLaunchCooperativeKernel((void*) kernel_diff, dimGrid, dimBlock, args); if (cudaStatus != cudaSuccess) { cudaError_t err = cudaGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, cudaGetErrorName(err), cudaGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { cudaError_t err = cudaGetLastError(); fprintf(stderr, "\nError Code %d : %s: %s .", cudaStatus, cudaGetErrorName(err), cudaGetErrorString(err)); std::cout << "\nExiting the program manually..."; getchar(); exit(-1); } delete args; } //cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, my_kernel, numThreads, 0); // initialize, then launch // cudaLaunchCooperativeKernel((void*)my_kernel, deviceProp.multiProcessorCount*numBlocksPerSm, numThreads, args); // dim3 dimGrid(numSms * numBlocksPerSm, 1, 1); // cudaLaunchCooperativeKernel( // const T *func, // dim3 gridDim, // dim3 blockDim, // void **args, // size_t sharedMem = 0, // cudaStream_t stream = 0 // ) } void GPU_Kenerl_t::kernel_reduction(double& gpu_prob, long long& gpu_size){ long long size_arr[GRID_THREAD_SIZE]; double prob_arr[GRID_THREAD_SIZE]; const int size = GRID_THREAD_SIZE; auto cudaStatus = cudaMemcpy(size_arr, device_cluster_size_final, sizeof(unsigned long long)* size, cudaMemcpyDeviceToHost); #ifdef CUDA_ERROR_PRINT if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy (device_cluster_size_final) failed!");\ getchar(); } #endif cudaStatus = cudaMemcpy(prob_arr, device_prob_final, sizeof(double)* size, cudaMemcpyDeviceToHost); #ifdef CUDA_ERROR_PRINT if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy (device_prob_final) failed!");\ getchar(); } #endif printf("\nBefore Reduction \t GPU_Cluster_size : %lld\t GPU_Prob : %f",gpu_size, gpu_prob); for (int i=0;i< size; i++ ){ gpu_size += size_arr[i]; gpu_prob += prob_arr[i]; } printf("\nAfter Reduction \t GPU_Cluster_size : %lld\t GPU_Prob : %f",gpu_size, gpu_prob); } //Called Once (1) @ program entry. void SPN_DIFF::init(){ int pi=0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); std::cout << "\nGPU Info :"; std::cout << "\nSM numbers: " << deviceProp.multiProcessorCount; cudaDeviceGetAttribute(&pi, cudaDevAttrCooperativeLaunch, 0); std::cout << "\nSupport Cooperative Groups (Grid): " << (pi==1? " True":" FALSE"); int numBlocksPerSm; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, kernel_diff, THREAD_PER_BLOCK, 0); std::cout << "\nMax Blocks Per SM : " << numBlocksPerSm; if (deviceProp.multiProcessorCount != 9){ std::cout << "\nPress enter key to conitnue"; std::cout << "\n----------\n"; getchar(); } if (pi==0){ std::cout << "\nCooperative Groups not supported on target GPU"; exit(-1); } // std::cout <<"\nInit Trifle Reverse Differential Table:{\n"; // std::cout <<"\nPRESENT Permutation:{\n"; for (int i = 0; i < 64; i++) { // if (i%16==0){ // std:: cerr <<"\n"; // } SPN_DIFF::perm_host[i] = (i / 4) + ((i % 4) * 16); // std::cout << (int) SPN_DIFF::perm_host[i]<< ","; } // std::cout << "\n}\n" ; // std::cout <<"\nPresent Permutation Reversed:{\n"; for (int i=0;i<64;i++){ SPN_DIFF::perm_host_reversed[SPN_DIFF::perm_host[i]] = i; } // for (int i=0;i<64;i++){ // std::cout << (int) SPN_DIFF::perm_host_reversed[i]<< ","; // } // std::cout << "}\n" ; //-- // std::cout <<"\n4bit Permutation LUTable * 32 (Size is 32*16*16 is 8192Bytes) :{\n"; for (int sbox_pos=0;sbox_pos<16;sbox_pos++){ for (int sbox_val=0;sbox_val<16;sbox_val++){ unsigned char dx[16] = {0}; dx[sbox_pos] = sbox_val; //Do permutation unsigned long long front_64 = 0, front_64_reversed=0; for (int i = 0; i < 16; i++) { if (dx[i] > 0) { for (int j = 0; j < 4; j++) { //Actually filtered_bit unsigned long long filtered_word = ((dx[i] & (0x1 << j)) >> j) & 0x1; if (filtered_word == 0) continue; //no point continue if zero, go to next elements int bit_pos = (SPN_DIFF::perm_host[((15 - i) * 4) + j]); int bit_pos_reversed = (SPN_DIFF::perm_host_reversed[((15 - i) * 4) + j]); front_64 |= (filtered_word << bit_pos); front_64_reversed |= (filtered_word << bit_pos_reversed); } } } //Front 64, 0-15, Back64 - 16-31 SPN_DIFF::perm_lookup_host[sbox_pos][sbox_val]=front_64; SPN_DIFF::perm_lookup_host_reversed[sbox_pos][sbox_val]=front_64_reversed; } } // std::cout << "}\n" ; };
dbb3936a906ea8c33c01609b388439d4fcb121ba.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
dbb3936a906ea8c33c01609b388439d4fcb121ba.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
d4e200a0c6cac552b655318e9a2fbd5225b92214.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_linear1d_out_frame( const int n, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor64<scalar_t, 3> idata, PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][w1]; odata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p]; odata[n][c][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_linear1d_out_frame_backward( const int n, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor64<scalar_t, 3> idata, const PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][w1]; idata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][w2]; atomicAdd(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val)); atomicAdd( &idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val)); } } } } static void upsample_linear1d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_linear1d_out_cuda", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); int output_width = output_size[0]; int nbatch = input.size(0); int channels = input.size(1); int input_width = input.size(2); upsample_1d_shape_check( input, Tensor(), nbatch, channels, input_width, output_width); output.resize_({input.size(0), input.size(1), output_width}); output.zero_(); AT_ASSERT(input_width > 0 && output_width > 0); const int num_kernels = output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_linear1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor64<scalar_t, 3>(); auto odata = output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); hipLaunchKernelGGL(( upsample_linear1d_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } static void upsample_linear1d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { TensorArg grad_output_arg{grad_output_, "grad_output_", 1}, grad_input_arg{grad_input, "grad_input", 2}; checkAllSameGPU( "upsample_linear1d_backward_out_cuda", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 3, "It is expected input_size equals to 3, but got size ", input_size.size()); int output_width = output_size[0]; int nbatch = input_size[0]; int channels = input_size[1]; int input_width = input_size[2]; upsample_1d_shape_check( Tensor(), grad_output_, nbatch, channels, input_width, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_width}); grad_input.zero_(); const int num_kernels = output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor64<scalar_t, 3>(); auto odata = grad_output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); hipLaunchKernelGGL(( upsample_linear1d_out_frame_backward<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } } // namespace Tensor& upsample_linear1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { upsample_linear1d_out_cuda_template( output, input, output_size, align_corners, scales); return output; } Tensor upsample_linear1d_cuda( const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_out_cuda_template( output, input, output_size, align_corners, scales); return output; } Tensor& upsample_linear1d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); return grad_input; } Tensor upsample_linear1d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); return grad_input; } } // namespace native } // namespace at
d4e200a0c6cac552b655318e9a2fbd5225b92214.cu
// Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_linear1d_out_frame( const int n, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor64<scalar_t, 3> idata, PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][w1]; odata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p]; odata[n][c][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_linear1d_out_frame_backward( const int n, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor64<scalar_t, 3> idata, const PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][w1]; idata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][w2]; atomicAdd(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val)); atomicAdd( &idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val)); } } } } static void upsample_linear1d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_linear1d_out_cuda", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); int output_width = output_size[0]; int nbatch = input.size(0); int channels = input.size(1); int input_width = input.size(2); upsample_1d_shape_check( input, Tensor(), nbatch, channels, input_width, output_width); output.resize_({input.size(0), input.size(1), output_width}); output.zero_(); AT_ASSERT(input_width > 0 && output_width > 0); const int num_kernels = output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_linear1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor64<scalar_t, 3>(); auto odata = output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); upsample_linear1d_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } static void upsample_linear1d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { TensorArg grad_output_arg{grad_output_, "grad_output_", 1}, grad_input_arg{grad_input, "grad_input", 2}; checkAllSameGPU( "upsample_linear1d_backward_out_cuda", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 3, "It is expected input_size equals to 3, but got size ", input_size.size()); int output_width = output_size[0]; int nbatch = input_size[0]; int channels = input_size[1]; int input_width = input_size[2]; upsample_1d_shape_check( Tensor(), grad_output_, nbatch, channels, input_width, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_width}); grad_input.zero_(); const int num_kernels = output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor64<scalar_t, 3>(); auto odata = grad_output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); upsample_linear1d_out_frame_backward<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } } // namespace Tensor& upsample_linear1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { upsample_linear1d_out_cuda_template( output, input, output_size, align_corners, scales); return output; } Tensor upsample_linear1d_cuda( const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_out_cuda_template( output, input, output_size, align_corners, scales); return output; } Tensor& upsample_linear1d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); return grad_input; } Tensor upsample_linear1d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); return grad_input; } } // namespace native } // namespace at
0a3d8b921e354293ba76a4c396610d69cb452590.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, uchar4 * const greyImage, int numRows, int numCols) { //TODO (solved) //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D index // could also use blockDim.x instead of numCols //Columna, fila int index = numCols * blockIdx.x + threadIdx.x; uchar4 rgbpx = rgbaImage[index]; // greyImage[index] = (unsigned char)(0.299f * rgbpx.x + 0.587f * // rgbpx.y + 0.114f * rgbpx.z); int size_kernel = 3; int start_x = blockIdx.x; int start_y = threadIdx.x; int accumulator = 0; double red = 0.0; double green = 0.0; double blue = 0.0; int current_pixel; bool valid; uchar4 rgbpx_current_pixel; for(int i = start_x; i < (start_x + size_kernel); i++){ for(int j = start_y; j < (start_y + size_kernel); j++){ // if (i < 0 || i >= numCols) valid = false; else if (j < 0 || j >= numRows) valid = false; else valid = true; if (valid){ current_pixel = numCols * i + j; rgbpx_current_pixel = rgbaImage[current_pixel]; red += rgbpx_current_pixel.x; green += rgbpx_current_pixel.y; blue += rgbpx_current_pixel.z; // greyImage[index] = (unsigned char)(0.299f * rgbpx_current_pixel.x + // 0.587f * rgbpx_current_pixel.y + 0.114f * rgbpx_current_pixel.z); accumulator += 1; } } } if(index % 10000 == 0){ printf("Original %d: %d \n", index, rgbpx.x); rgbpx.x = red; rgbpx.y = green; rgbpx.z = blue; printf("Changed %d: %d \n",index, rgbpx.x); greyImage[index] = rgbpx; } // // // int debug = 0; // int id = index; // // // int accumulator, cornerX, cornerY, sumRGB[] = {0,0,0}; // // //printf("Hilo: %d\n", id); // for(int x=id;x<img.cols;x+=THREADS){ // //printf("hilo: %d, col: %d\n", id, x); // for(int y=0;y<img.rows;y++){ // //sem_wait(&semvar2); // //cout << y << endl; // // sumRGB[0]=0; // sumRGB[1]=0; // sumRGB[2]=0; // accumulator=0; // if(k%2==0){ // cornerX = x-(k-1)/2; // cornerY = y-(k-1)/2; // }else{ // cornerX = x-(k-2)/2; // cornerY = y-(k-2)/2; // } // // for(int i = 0; i < k; i++){ // for(int j = 0; j < k; j++){ // if(checkBounds(cornerX+j,cornerY+i, img.cols, img.rows)){ // Vec3b color = img.at<Vec3b>(Point(cornerX+j,cornerY+i)); // sumRGB[0] += (int) color(0); // sumRGB[1] += (int) color(1); // sumRGB[2] += (int) color(2); // accumulator++; // } // } // } // // if(accumulator == 0){ // sumRGB[0] = img.at<Vec3b>(Point(x,y))(0); // sumRGB[1] = img.at<Vec3b>(Point(x,y))(1); // sumRGB[2] = img.at<Vec3b>(Point(x,y))(2); // }else{ // sumRGB[0] = sumRGB[0] / accumulator; // sumRGB[1] = sumRGB[1] / accumulator; // sumRGB[2] = sumRGB[2] / accumulator; // } // //sem_post(&semvar2); // Vec3b color; // color(0) = sumRGB[0]; // color(1) = sumRGB[1]; // color(2) = sumRGB[2]; // #pragma omp critical // blurred.at<Vec3b>(Point(x,y)) = color; // // // } // } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, uchar4 * const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 gridSize(numRows, 1, 1); //TODO (solved) const dim3 blockSize(numCols, 1, 1); //TODO (solved) hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
0a3d8b921e354293ba76a4c396610d69cb452590.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, uchar4 * const greyImage, int numRows, int numCols) { //TODO (solved) //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D index // could also use blockDim.x instead of numCols //Columna, fila int index = numCols * blockIdx.x + threadIdx.x; uchar4 rgbpx = rgbaImage[index]; // greyImage[index] = (unsigned char)(0.299f * rgbpx.x + 0.587f * // rgbpx.y + 0.114f * rgbpx.z); int size_kernel = 3; int start_x = blockIdx.x; int start_y = threadIdx.x; int accumulator = 0; double red = 0.0; double green = 0.0; double blue = 0.0; int current_pixel; bool valid; uchar4 rgbpx_current_pixel; for(int i = start_x; i < (start_x + size_kernel); i++){ for(int j = start_y; j < (start_y + size_kernel); j++){ // if (i < 0 || i >= numCols) valid = false; else if (j < 0 || j >= numRows) valid = false; else valid = true; if (valid){ current_pixel = numCols * i + j; rgbpx_current_pixel = rgbaImage[current_pixel]; red += rgbpx_current_pixel.x; green += rgbpx_current_pixel.y; blue += rgbpx_current_pixel.z; // greyImage[index] = (unsigned char)(0.299f * rgbpx_current_pixel.x + // 0.587f * rgbpx_current_pixel.y + 0.114f * rgbpx_current_pixel.z); accumulator += 1; } } } if(index % 10000 == 0){ printf("Original %d: %d \n", index, rgbpx.x); rgbpx.x = red; rgbpx.y = green; rgbpx.z = blue; printf("Changed %d: %d \n",index, rgbpx.x); greyImage[index] = rgbpx; } // // // int debug = 0; // int id = index; // // // int accumulator, cornerX, cornerY, sumRGB[] = {0,0,0}; // // //printf("Hilo: %d\n", id); // for(int x=id;x<img.cols;x+=THREADS){ // //printf("hilo: %d, col: %d\n", id, x); // for(int y=0;y<img.rows;y++){ // //sem_wait(&semvar2); // //cout << y << endl; // // sumRGB[0]=0; // sumRGB[1]=0; // sumRGB[2]=0; // accumulator=0; // if(k%2==0){ // cornerX = x-(k-1)/2; // cornerY = y-(k-1)/2; // }else{ // cornerX = x-(k-2)/2; // cornerY = y-(k-2)/2; // } // // for(int i = 0; i < k; i++){ // for(int j = 0; j < k; j++){ // if(checkBounds(cornerX+j,cornerY+i, img.cols, img.rows)){ // Vec3b color = img.at<Vec3b>(Point(cornerX+j,cornerY+i)); // sumRGB[0] += (int) color(0); // sumRGB[1] += (int) color(1); // sumRGB[2] += (int) color(2); // accumulator++; // } // } // } // // if(accumulator == 0){ // sumRGB[0] = img.at<Vec3b>(Point(x,y))(0); // sumRGB[1] = img.at<Vec3b>(Point(x,y))(1); // sumRGB[2] = img.at<Vec3b>(Point(x,y))(2); // }else{ // sumRGB[0] = sumRGB[0] / accumulator; // sumRGB[1] = sumRGB[1] / accumulator; // sumRGB[2] = sumRGB[2] / accumulator; // } // //sem_post(&semvar2); // Vec3b color; // color(0) = sumRGB[0]; // color(1) = sumRGB[1]; // color(2) = sumRGB[2]; // #pragma omp critical // blurred.at<Vec3b>(Point(x,y)) = color; // // // } // } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, uchar4 * const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 gridSize(numRows, 1, 1); //TODO (solved) const dim3 blockSize(numCols, 1, 1); //TODO (solved) rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
a59f7f5075257e894ca7ea2b246483bcddab298f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "k-LiMapS.cu" #include "vectorUtility.cu" #include "MoorePenrose.cu" #include "createDict.cu" /* Questo programma esegue i test "noisy" per l'algoritmo k-LiMapS Il dizionario D e il vettore k-sparso alphaopt vengono generati in GPU a partire da una distribuzione gaussiana. La pseudoinversa viene calcolata in GPU con l'algoritmo di Jacobi per SVD. Il segnale sNoisy viene calcolato come D * alphaopt + epsilon, dove epsilon un vettore generato a partire dalla distribuzione gaussiana e scalato di un fattore pari a 10^-4. Il programma, con numero di righe della matrice n preso in in input da riga di comando, esegue diversi test per valori di m = n,...,5n e k = 10%n,15%n,...,50%n Per ogni tripla di valori n,m,k vengono eseguite 50 iterazioni, da cui vengono calcolati alcuni valori come: succ%: una stima di quante volte l'algoritmo k-LiMapS ha prodotto una soluzione approssimata alphalimaps tale che valesse 0 dove alphaopt valeva 0 e differisse al pi 0.1 altrove avgMSE: la media sulle 50 iterazioni del MeanSquareError tra D*alphaopt e D*alphalimaps avgTime: la media sulle 50 iterazioni del tempo di calcolo dell'algoritmo k-LiMapS Viene generato un nuovo dizionario per ogni valore di m, mentre alphaopt viene estratto ad ogni singola iterazione. */ int main(int argc, char **argv){ if(argc != 2){ printf("usage: noisyTest <n>\n"); return 2; } setbuf(stdout, NULL); int n = atoi(argv[1]); printf(" n| m| delta| k| rho| succ%%| avgMSE | avgTime |\n"); //Alloca i puntatori alla memoria deviceS double *D,*DINV,*alphaopt,*s,*sNoisy,*epsilon,*alphalimaps,*h_alphalimaps,*h_alphaopt; CHECK(hipMalloc(&s, n*sizeof(double))); CHECK(hipMalloc(&sNoisy, n*sizeof(double))); CHECK(hipMalloc(&epsilon, n*sizeof(double))); //Alloca i puntatori per hiprand int blocks = ceil(n*1.0/BLOCK_SIZE); srand(time(NULL)); hiprandState_t *devStates; CHECK(hipMalloc(&devStates, n*sizeof(hiprandState_t))); //Crea il cublas handle hipblasHandle_t cublasHandle; CHECK_CUBLAS(hipblasCreate(&cublasHandle)); double cualpha=1,cubeta=0; //Ciclo su m da n a 5n, step n for(int m = n; m <= 5*n; m += n){ //Crea il dizionario CHECK(hipMalloc(&D, n*m*sizeof(double))); createDict(D, n, m); //Calcola la pseudoinversa CHECK(hipMalloc(&DINV, m*n*sizeof(double))); JacobiMoorePenroseInverse(D, n, m, DINV); if(!CheckPseudoinverse(D, n, m, DINV)) printf("Something went wrong with the Moore-Penrose pseudoinverse!\n"); //Alloca alphaopt e alphalimaps CHECK(hipMalloc(&alphaopt, m*sizeof(double))); CHECK(hipMalloc(&alphalimaps, m*sizeof(double))); CHECK(hipHostMalloc(&h_alphaopt, m*sizeof(double))); CHECK(hipHostMalloc(&h_alphalimaps, m*sizeof(double))); //Ciclo su k dal 10% di n al 50%, step 5% for(int l = 10; l<=50; l+=5){ int k = n*l/100.0; int iters; int succ = 0; double avgMSE = 0; double avgTime = 0; //n, m, delta, k, rho printf("%5d| %5d| %5.2f| %5d| %5.2f| ", n, m, n*1.0/m, k, k*1.0/n); for(iters=0; iters<50; iters++){ //Genera alphaopt generateAlpha(alphaopt, m, k); //Calcola sNoisy = D * alphaopt + epsilon (salvando s per il calcolo del MSE) CHECK_CUBLAS(hipblasDgemv(cublasHandle, HIPBLAS_OP_N, n, m, &cualpha, D, n, alphaopt, 1, &cubeta, s, 1)); hipLaunchKernelGGL(( normfill), dim3(blocks),dim3(BLOCK_SIZE), 0, 0, epsilon, n, devStates, rand()); hipDeviceSynchronize(); //Scala epsilon di un fattore 10^-4 hipLaunchKernelGGL(( divide), dim3(blocks),dim3(BLOCK_SIZE), 0, 0, epsilon, 10000, n); hipDeviceSynchronize(); hipLaunchKernelGGL(( vectorSum), dim3(blocks),dim3(BLOCK_SIZE), 0, 0, 1, s, 1, epsilon, sNoisy, n); //Chiama K_LiMapS double t=seconds(); k_LiMapS(k, D, n, m, DINV, sNoisy, alphalimaps, 1000); avgTime += seconds() - t; //Check del risultato (succ%) CHECK(hipMemcpy(h_alphaopt, alphaopt, m*sizeof(double), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(h_alphalimaps, alphalimaps, m*sizeof(double), hipMemcpyDeviceToHost)); int i; for(i=0; i<m; i++) if(h_alphaopt[i] == 0 && h_alphalimaps[i] != 0 || h_alphaopt[i] != 0 && fabs(h_alphaopt[i] - h_alphalimaps[i]) > 0.1) break; if(i == m) succ++; //Calcola MSE avgMSE += MSE(s,D,alphalimaps,n,m); } avgMSE /= iters; avgTime /= iters; //succ, avgMSE, avgTime printf("%6.2f| %17.15f| %8.6f|\n", succ*100.0/50, avgMSE, avgTime); } CHECK(hipFree(D)); CHECK(hipFree(DINV)); CHECK(hipFree(alphaopt)); CHECK(hipFree(alphalimaps)); CHECK(hipHostFree(h_alphaopt)); CHECK(hipHostFree(h_alphalimaps)); } CHECK(hipFree(s)); CHECK(hipFree(epsilon)); CHECK(hipFree(sNoisy)); return 0; }
a59f7f5075257e894ca7ea2b246483bcddab298f.cu
#include "k-LiMapS.cu" #include "vectorUtility.cu" #include "MoorePenrose.cu" #include "createDict.cu" /* Questo programma esegue i test "noisy" per l'algoritmo k-LiMapS Il dizionario D e il vettore k-sparso alphaopt vengono generati in GPU a partire da una distribuzione gaussiana. La pseudoinversa viene calcolata in GPU con l'algoritmo di Jacobi per SVD. Il segnale sNoisy viene calcolato come D * alphaopt + epsilon, dove epsilon è un vettore generato a partire dalla distribuzione gaussiana e scalato di un fattore pari a 10^-4. Il programma, con numero di righe della matrice n preso in in input da riga di comando, esegue diversi test per valori di m = n,...,5n e k = 10%n,15%n,...,50%n Per ogni tripla di valori n,m,k vengono eseguite 50 iterazioni, da cui vengono calcolati alcuni valori come: succ%: una stima di quante volte l'algoritmo k-LiMapS ha prodotto una soluzione approssimata alphalimaps tale che valesse 0 dove alphaopt valeva 0 e differisse al più 0.1 altrove avgMSE: la media sulle 50 iterazioni del MeanSquareError tra D*alphaopt e D*alphalimaps avgTime: la media sulle 50 iterazioni del tempo di calcolo dell'algoritmo k-LiMapS Viene generato un nuovo dizionario per ogni valore di m, mentre alphaopt viene estratto ad ogni singola iterazione. */ int main(int argc, char **argv){ if(argc != 2){ printf("usage: noisyTest <n>\n"); return 2; } setbuf(stdout, NULL); int n = atoi(argv[1]); printf(" n| m| delta| k| rho| succ%%| avgMSE | avgTime |\n"); //Alloca i puntatori alla memoria deviceS double *D,*DINV,*alphaopt,*s,*sNoisy,*epsilon,*alphalimaps,*h_alphalimaps,*h_alphaopt; CHECK(cudaMalloc(&s, n*sizeof(double))); CHECK(cudaMalloc(&sNoisy, n*sizeof(double))); CHECK(cudaMalloc(&epsilon, n*sizeof(double))); //Alloca i puntatori per curand int blocks = ceil(n*1.0/BLOCK_SIZE); srand(time(NULL)); curandState *devStates; CHECK(cudaMalloc(&devStates, n*sizeof(curandState))); //Crea il cublas handle cublasHandle_t cublasHandle; CHECK_CUBLAS(cublasCreate(&cublasHandle)); double cualpha=1,cubeta=0; //Ciclo su m da n a 5n, step n for(int m = n; m <= 5*n; m += n){ //Crea il dizionario CHECK(cudaMalloc(&D, n*m*sizeof(double))); createDict(D, n, m); //Calcola la pseudoinversa CHECK(cudaMalloc(&DINV, m*n*sizeof(double))); JacobiMoorePenroseInverse(D, n, m, DINV); if(!CheckPseudoinverse(D, n, m, DINV)) printf("Something went wrong with the Moore-Penrose pseudoinverse!\n"); //Alloca alphaopt e alphalimaps CHECK(cudaMalloc(&alphaopt, m*sizeof(double))); CHECK(cudaMalloc(&alphalimaps, m*sizeof(double))); CHECK(cudaMallocHost(&h_alphaopt, m*sizeof(double))); CHECK(cudaMallocHost(&h_alphalimaps, m*sizeof(double))); //Ciclo su k dal 10% di n al 50%, step 5% for(int l = 10; l<=50; l+=5){ int k = n*l/100.0; int iters; int succ = 0; double avgMSE = 0; double avgTime = 0; //n, m, delta, k, rho printf("%5d| %5d| %5.2f| %5d| %5.2f| ", n, m, n*1.0/m, k, k*1.0/n); for(iters=0; iters<50; iters++){ //Genera alphaopt generateAlpha(alphaopt, m, k); //Calcola sNoisy = D * alphaopt + epsilon (salvando s per il calcolo del MSE) CHECK_CUBLAS(cublasDgemv(cublasHandle, CUBLAS_OP_N, n, m, &cualpha, D, n, alphaopt, 1, &cubeta, s, 1)); normfill<<<blocks,BLOCK_SIZE>>>(epsilon, n, devStates, rand()); cudaDeviceSynchronize(); //Scala epsilon di un fattore 10^-4 divide<<<blocks,BLOCK_SIZE>>>(epsilon, 10000, n); cudaDeviceSynchronize(); vectorSum<<<blocks,BLOCK_SIZE>>>(1, s, 1, epsilon, sNoisy, n); //Chiama K_LiMapS double t=seconds(); k_LiMapS(k, D, n, m, DINV, sNoisy, alphalimaps, 1000); avgTime += seconds() - t; //Check del risultato (succ%) CHECK(cudaMemcpy(h_alphaopt, alphaopt, m*sizeof(double), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(h_alphalimaps, alphalimaps, m*sizeof(double), cudaMemcpyDeviceToHost)); int i; for(i=0; i<m; i++) if(h_alphaopt[i] == 0 && h_alphalimaps[i] != 0 || h_alphaopt[i] != 0 && fabs(h_alphaopt[i] - h_alphalimaps[i]) > 0.1) break; if(i == m) succ++; //Calcola MSE avgMSE += MSE(s,D,alphalimaps,n,m); } avgMSE /= iters; avgTime /= iters; //succ, avgMSE, avgTime printf("%6.2f| %17.15f| %8.6f|\n", succ*100.0/50, avgMSE, avgTime); } CHECK(cudaFree(D)); CHECK(cudaFree(DINV)); CHECK(cudaFree(alphaopt)); CHECK(cudaFree(alphalimaps)); CHECK(cudaFreeHost(h_alphaopt)); CHECK(cudaFreeHost(h_alphalimaps)); } CHECK(cudaFree(s)); CHECK(cudaFree(epsilon)); CHECK(cudaFree(sNoisy)); return 0; }
07495edbfc7c47d33e66d44d7d346d3349cdf126.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //GPU Programming - Project //Name: Cavaughn Browne //Parallel Programming Date: 12/5/2016 //Reads N sets of data from a file called data.dat and processes them using //the FFT-Cooley Tukey Algorithm. //compile with these lines with the data.dat file in the same directory //module load cuda //nvcc -arch=compute_35 -code=sm_35 CooleyTukeyImplementationCuda.cu -o a.out #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define PI 3.141592653589793 //A ComplexNum consists of a two doubles: real and imag struct ComplexNum { double real; double imag; }; void FFT(ComplexNum* signals, int N); __global__ void FFT_Kernel(ComplexNum* signals_d, ComplexNum* XkResults, int N); int main(int argc, char **argv) { FILE *fp; fp = fopen("data.dat", "r"); //Takes the first argument from stdin and stores it in N int N = atoi(argv[1]); int j; struct ComplexNum x[N]; //to store the signals in the file j = 0; //Read from the file while j < N and not the end of the file while(j < N && !feof(fp)) { fscanf(fp, "%lf", &x[j].real); fscanf(fp, "%lf", &x[j].imag); j++; } //fill the rest of the array with 0(s) if j < N if(j < N) { for(; j < N; j++) { x[j].real = 0; x[j].imag = 0; } } printf("TOTAL PROCESSED SAMPLES: %d\n", N); //calculate the FFT FFT(x, N); } /* Invokes the FFT_Kernel to calculate the FFT of the "signals" of number N. After the first 8 results are printed. */ void FFT(ComplexNum* signals, int N) { int size = N * 2* sizeof(double); int threads; int blocks; ComplexNum* signals_d; ComplexNum* XkResults_d; ComplexNum XkResults_h[N]; float time, commTime, commTime2; hipEvent_t start, stop, start2, stop2; //For timing clock_t st, diff, st2, diff2; //calculate the number of blocks and threads to use if(N < 1024) { threads = N % 1024; blocks = N/threads; } else if(N % 1024 == 0) { threads = 1024; blocks = N/threads; } else { threads = 1024; blocks = (N/threads) + 1; } //Cuda Time hipEventCreate(&start2); //Creates the start2 time event hipEventCreate(&stop2) ; //Creates the stop2 time event hipEventRecord(start2, 0) ; //Records the start2 time //C-time //store the current clock time in st st2 = clock(); //Memory Allocation and Transfer hipMalloc((void**)&signals_d, size); hipMemcpy(signals_d, signals, size, hipMemcpyHostToDevice); hipMalloc((void**)&XkResults_d, size); //End of timing period //C-time //find the difference between the st clock time and current clock time diff2 = clock() - st2; //Cuda-time hipEventRecord(stop2, 0); //Records the stop time hipEventSynchronize(stop2); //the elapsed time between start and stop is stored in time in //milliseconds hipEventElapsedTime(&commTime, start2, stop2); hipEventDestroy(start2); hipEventDestroy(stop2); //New Timing period //Cuda-time hipEventCreate(&start); //Creates the start time event hipEventCreate(&stop) ; //Creates the stop time event hipEventRecord(start, 0) ; //Records the start time //C-time //store the current clock time in st st = clock(); hipLaunchKernelGGL(( FFT_Kernel), dim3(blocks), dim3(threads) , 0, 0, signals_d, XkResults_d, N); //C-time //find the difference between the st clock time and current clock time diff = clock() - st; //Cuda-time hipEventRecord(stop, 0); //Records the stop time hipEventSynchronize(stop); //the elapsed time between start and stop is stored in time in //milliseconds hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); //Start timing again //Cuda Time hipEventCreate(&start2); //Creates the start2 time event hipEventCreate(&stop2) ; //Creates the stop2 time event hipEventRecord(start2, 0) ; //Records the start2 time //C-time //store the current clock time in st st2 = clock(); //copy the results from the device back to the host hipMemcpy(XkResults_h, XkResults_d, size, hipMemcpyDeviceToHost); //Free memory on device hipFree(signals_d); hipFree(XkResults_d); //End of timing period //C-time //find the difference between the st clock time and current clock time diff2 += (clock() - st2); //Cuda-time hipEventRecord(stop2, 0); //Records the stop time hipEventSynchronize(stop2); //the elapsed time between start and stop is stored in time in //milliseconds hipEventElapsedTime(&commTime2, start2, stop2); hipEventDestroy(start2); hipEventDestroy(stop2); //prints the first 8 int p; for (p = 0; p < 8; p++) { printf("XR[%d] : %lf\nXI[%d] : %lf\n", p, XkResults_h[p].real, p, XkResults_h[p].imag); } printf("Cuda Communication Time: %3.1f ms \n", commTime + commTime2); printf("C Communication Time: %3.1f ms \n\n", diff2); printf("Cuda Calculation Time: %3.1f ms \n", time); printf("C Calculation Time: %3.1f ms \n", diff); } /* Each thread calculates one Xk result by summing up even and odd parts for m = 0 to m < N/2 of the FFT equation */ __global__ void FFT_Kernel(ComplexNum* signals_d, ComplexNum* XkResults, int N) { struct ComplexNum Xk; struct ComplexNum evenP; struct ComplexNum oddP; double c, s, realPart, imgPart; int m, k; //thread will compute Xk....k = threadIdx.x k = blockIdx.x * blockDim.x + threadIdx.x; double theta = (-2 * PI * k) / (N / 2); if(k < N) { evenP.real = 0; evenP.imag = 0; oddP.real = 0; oddP.imag = 0; for ( m = 0; m < N / 2; m++) { c = cos(theta * m); s = sin(theta * m); //Even Index Part computation realPart = (signals_d[2 * m].real *c) - ((signals_d[2 * m].imag * s)); evenP.real += realPart; imgPart = (signals_d[2 * m].real *s) + ((signals_d[2 * m].imag * c)); evenP.imag += imgPart; //Odd Index Part Computation realPart = (signals_d[(2 * m) + 1].real *c) - ((signals_d[(2 * m) + 1].imag * s)); oddP.real += realPart; imgPart = (signals_d[(2 * m) + 1].real *s) + ((signals_d[(2 * m) + 1].imag * c)); oddP.imag += imgPart; } //Add the real and the odd part sums and store the result. Xk.real = evenP.real + (cos(theta / 2) * oddP.real) - (sin(theta / 2) * oddP.imag); Xk.imag = evenP.imag + (cos(theta / 2) * oddP.imag) + (sin(theta / 2) * oddP.real); XkResults[k] = Xk; } }
07495edbfc7c47d33e66d44d7d346d3349cdf126.cu
//GPU Programming - Project //Name: Cavaughn Browne //Parallel Programming Date: 12/5/2016 //Reads N sets of data from a file called data.dat and processes them using //the FFT-Cooley Tukey Algorithm. //compile with these lines with the data.dat file in the same directory //module load cuda //nvcc -arch=compute_35 -code=sm_35 CooleyTukeyImplementationCuda.cu -o a.out #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define PI 3.141592653589793 //A ComplexNum consists of a two doubles: real and imag struct ComplexNum { double real; double imag; }; void FFT(ComplexNum* signals, int N); __global__ void FFT_Kernel(ComplexNum* signals_d, ComplexNum* XkResults, int N); int main(int argc, char **argv) { FILE *fp; fp = fopen("data.dat", "r"); //Takes the first argument from stdin and stores it in N int N = atoi(argv[1]); int j; struct ComplexNum x[N]; //to store the signals in the file j = 0; //Read from the file while j < N and not the end of the file while(j < N && !feof(fp)) { fscanf(fp, "%lf", &x[j].real); fscanf(fp, "%lf", &x[j].imag); j++; } //fill the rest of the array with 0(s) if j < N if(j < N) { for(; j < N; j++) { x[j].real = 0; x[j].imag = 0; } } printf("TOTAL PROCESSED SAMPLES: %d\n", N); //calculate the FFT FFT(x, N); } /* Invokes the FFT_Kernel to calculate the FFT of the "signals" of number N. After the first 8 results are printed. */ void FFT(ComplexNum* signals, int N) { int size = N * 2* sizeof(double); int threads; int blocks; ComplexNum* signals_d; ComplexNum* XkResults_d; ComplexNum XkResults_h[N]; float time, commTime, commTime2; cudaEvent_t start, stop, start2, stop2; //For timing clock_t st, diff, st2, diff2; //calculate the number of blocks and threads to use if(N < 1024) { threads = N % 1024; blocks = N/threads; } else if(N % 1024 == 0) { threads = 1024; blocks = N/threads; } else { threads = 1024; blocks = (N/threads) + 1; } //Cuda Time cudaEventCreate(&start2); //Creates the start2 time event cudaEventCreate(&stop2) ; //Creates the stop2 time event cudaEventRecord(start2, 0) ; //Records the start2 time //C-time //store the current clock time in st st2 = clock(); //Memory Allocation and Transfer cudaMalloc((void**)&signals_d, size); cudaMemcpy(signals_d, signals, size, cudaMemcpyHostToDevice); cudaMalloc((void**)&XkResults_d, size); //End of timing period //C-time //find the difference between the st clock time and current clock time diff2 = clock() - st2; //Cuda-time cudaEventRecord(stop2, 0); //Records the stop time cudaEventSynchronize(stop2); //the elapsed time between start and stop is stored in time in //milliseconds cudaEventElapsedTime(&commTime, start2, stop2); cudaEventDestroy(start2); cudaEventDestroy(stop2); //New Timing period //Cuda-time cudaEventCreate(&start); //Creates the start time event cudaEventCreate(&stop) ; //Creates the stop time event cudaEventRecord(start, 0) ; //Records the start time //C-time //store the current clock time in st st = clock(); FFT_Kernel<<< blocks, threads >>>(signals_d, XkResults_d, N); //C-time //find the difference between the st clock time and current clock time diff = clock() - st; //Cuda-time cudaEventRecord(stop, 0); //Records the stop time cudaEventSynchronize(stop); //the elapsed time between start and stop is stored in time in //milliseconds cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); //Start timing again //Cuda Time cudaEventCreate(&start2); //Creates the start2 time event cudaEventCreate(&stop2) ; //Creates the stop2 time event cudaEventRecord(start2, 0) ; //Records the start2 time //C-time //store the current clock time in st st2 = clock(); //copy the results from the device back to the host cudaMemcpy(XkResults_h, XkResults_d, size, cudaMemcpyDeviceToHost); //Free memory on device cudaFree(signals_d); cudaFree(XkResults_d); //End of timing period //C-time //find the difference between the st clock time and current clock time diff2 += (clock() - st2); //Cuda-time cudaEventRecord(stop2, 0); //Records the stop time cudaEventSynchronize(stop2); //the elapsed time between start and stop is stored in time in //milliseconds cudaEventElapsedTime(&commTime2, start2, stop2); cudaEventDestroy(start2); cudaEventDestroy(stop2); //prints the first 8 int p; for (p = 0; p < 8; p++) { printf("XR[%d] : %lf\nXI[%d] : %lf\n", p, XkResults_h[p].real, p, XkResults_h[p].imag); } printf("Cuda Communication Time: %3.1f ms \n", commTime + commTime2); printf("C Communication Time: %3.1f ms \n\n", diff2); printf("Cuda Calculation Time: %3.1f ms \n", time); printf("C Calculation Time: %3.1f ms \n", diff); } /* Each thread calculates one Xk result by summing up even and odd parts for m = 0 to m < N/2 of the FFT equation */ __global__ void FFT_Kernel(ComplexNum* signals_d, ComplexNum* XkResults, int N) { struct ComplexNum Xk; struct ComplexNum evenP; struct ComplexNum oddP; double c, s, realPart, imgPart; int m, k; //thread will compute Xk....k = threadIdx.x k = blockIdx.x * blockDim.x + threadIdx.x; double theta = (-2 * PI * k) / (N / 2); if(k < N) { evenP.real = 0; evenP.imag = 0; oddP.real = 0; oddP.imag = 0; for ( m = 0; m < N / 2; m++) { c = cos(theta * m); s = sin(theta * m); //Even Index Part computation realPart = (signals_d[2 * m].real *c) - ((signals_d[2 * m].imag * s)); evenP.real += realPart; imgPart = (signals_d[2 * m].real *s) + ((signals_d[2 * m].imag * c)); evenP.imag += imgPart; //Odd Index Part Computation realPart = (signals_d[(2 * m) + 1].real *c) - ((signals_d[(2 * m) + 1].imag * s)); oddP.real += realPart; imgPart = (signals_d[(2 * m) + 1].real *s) + ((signals_d[(2 * m) + 1].imag * c)); oddP.imag += imgPart; } //Add the real and the odd part sums and store the result. Xk.real = evenP.real + (cos(theta / 2) * oddP.real) - (sin(theta / 2) * oddP.imag); Xk.imag = evenP.imag + (cos(theta / 2) * oddP.imag) + (sin(theta / 2) * oddP.real); XkResults[k] = Xk; } }
4d5e43b504a5146eef08664d1aae8db114392b37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) int ceil_div_2(int a, int b){ return (a + b - 1) / b; } template <typename T> __global__ void RoIPoolFForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* bottom_rois, T* top_data, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (offset_bottom_data[bottom_index] > maxval) { maxval = offset_bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void RoIPoolFBackward(const int nthreads, const T* top_diff, const int* argmax_data, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; int bottom_offset = (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; T* offset_bottom_diff = bottom_diff + bottom_offset; const int* offset_argmax_data = argmax_data + top_offset; int argmax = offset_argmax_data[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( offset_bottom_diff + argmax, static_cast<T>(offset_top_diff[ph * pooled_width + pw])); } } } std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; auto argmax = at::zeros({num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // dim3 grid(::min(THCCeilDiv(output_size, 512L), 4096L)); dim3 grid(::min(ceil_div_2((int)output_size, 512L), 4096)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(hipGetLastError()); return std::make_tuple(output, argmax); } AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIPool_forward", [&] { hipLaunchKernelGGL(( RoIPoolFForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois.contiguous().data<scalar_t>(), output.data<scalar_t>(), argmax.data<int>()); }); THCudaCheck(hipGetLastError()); return std::make_tuple(output, argmax); } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIPool_backward_cuda(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); // TODO add more checks auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // dim3 grid(::min(THCCeilDiv(grad.numel(), 512L), 4096L)); dim3 grid(::min(ceil_div_2((int)grad.numel(), 512L), 4096)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(hipGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIPool_backward", [&] { hipLaunchKernelGGL(( RoIPoolFBackward<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous().data<scalar_t>(), argmax.data<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return grad_input; }
4d5e43b504a5146eef08664d1aae8db114392b37.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) int ceil_div_2(int a, int b){ return (a + b - 1) / b; } template <typename T> __global__ void RoIPoolFForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* bottom_rois, T* top_data, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (offset_bottom_data[bottom_index] > maxval) { maxval = offset_bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void RoIPoolFBackward(const int nthreads, const T* top_diff, const int* argmax_data, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; int bottom_offset = (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; T* offset_bottom_diff = bottom_diff + bottom_offset; const int* offset_argmax_data = argmax_data + top_offset; int argmax = offset_argmax_data[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( offset_bottom_diff + argmax, static_cast<T>(offset_top_diff[ph * pooled_width + pw])); } } } std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; auto argmax = at::zeros({num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // dim3 grid(std::min(THCCeilDiv(output_size, 512L), 4096L)); dim3 grid(std::min(ceil_div_2((int)output_size, 512L), 4096)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return std::make_tuple(output, argmax); } AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIPool_forward", [&] { RoIPoolFForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois.contiguous().data<scalar_t>(), output.data<scalar_t>(), argmax.data<int>()); }); THCudaCheck(cudaGetLastError()); return std::make_tuple(output, argmax); } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIPool_backward_cuda(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); // TODO add more checks auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // dim3 grid(std::min(THCCeilDiv(grad.numel(), 512L), 4096L)); dim3 grid(std::min(ceil_div_2((int)grad.numel(), 512L), 4096)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(cudaGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIPool_backward", [&] { RoIPoolFBackward<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous().data<scalar_t>(), argmax.data<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return grad_input; }
884661b121feb413bcd568f5f4952e677c82312e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <algorithm> using namespace std; #define N 4096 #define RADIUS 3 #define BLOCK_SIZE 16 __global__ void stencil_1d(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2*RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Synchronize (ensure all the data is available) __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS; offset <= RADIUS; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; } void fill_ints(int *x, int n) { fill_n(x, n, 1); } int main(void) { int *in, *out; // host copies of a, b, c int *d_in, *d_out; // device copies of a, b, c // Alloc space for host copies and setup values int size = (N+2*RADIUS) * sizeof(int); in = (int *)malloc(size); fill_ints(in, N + 2*RADIUS); out = (int *)malloc(size); fill_ints(out, N + 2*RADIUS); // Alloc space for device copies hipMalloc((void **)&d_in, size); hipMalloc((void **)&d_out, size); // Copy to device hipMemcpy(d_in, in, size, hipMemcpyHostToDevice); hipMemcpy(d_out, out, size, hipMemcpyHostToDevice); // Launch stencil_1d() kernel on GPU hipLaunchKernelGGL(( stencil_1d), dim3(N/BLOCK_SIZE),dim3(BLOCK_SIZE), 0, 0, d_in + RADIUS, d_out + RADIUS); // Copy result back to host hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); // Error Checking for (int i = 0; i < N + 2*RADIUS; i++) { if (i<RADIUS || i>=N+RADIUS){ if (out[i] != 1) printf("Mismatch at index %d, was: %d, should be: %d\n", i, out[i], 1); } else { if (out[i] != 1 + 2*RADIUS) printf("Mismatch at index %d, was: %d, should be: %d\n", i, out[i], 1 + 2*RADIUS); } } // Cleanup free(in); free(out); hipFree(d_in); hipFree(d_out); printf("Success!\n"); return 0; }
884661b121feb413bcd568f5f4952e677c82312e.cu
#include <stdio.h> #include <algorithm> using namespace std; #define N 4096 #define RADIUS 3 #define BLOCK_SIZE 16 __global__ void stencil_1d(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2*RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Synchronize (ensure all the data is available) __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS; offset <= RADIUS; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; } void fill_ints(int *x, int n) { fill_n(x, n, 1); } int main(void) { int *in, *out; // host copies of a, b, c int *d_in, *d_out; // device copies of a, b, c // Alloc space for host copies and setup values int size = (N+2*RADIUS) * sizeof(int); in = (int *)malloc(size); fill_ints(in, N + 2*RADIUS); out = (int *)malloc(size); fill_ints(out, N + 2*RADIUS); // Alloc space for device copies cudaMalloc((void **)&d_in, size); cudaMalloc((void **)&d_out, size); // Copy to device cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); cudaMemcpy(d_out, out, size, cudaMemcpyHostToDevice); // Launch stencil_1d() kernel on GPU stencil_1d<<<N/BLOCK_SIZE,BLOCK_SIZE>>>(d_in + RADIUS, d_out + RADIUS); // Copy result back to host cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); // Error Checking for (int i = 0; i < N + 2*RADIUS; i++) { if (i<RADIUS || i>=N+RADIUS){ if (out[i] != 1) printf("Mismatch at index %d, was: %d, should be: %d\n", i, out[i], 1); } else { if (out[i] != 1 + 2*RADIUS) printf("Mismatch at index %d, was: %d, should be: %d\n", i, out[i], 1 + 2*RADIUS); } } // Cleanup free(in); free(out); cudaFree(d_in); cudaFree(d_out); printf("Success!\n"); return 0; }
5eeca77cfe960408f1e08a53ce13041b4924f772.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "dropout_layer_updater_cuda.h" #include "../dropout_layer.h" #include "neural_network_curand_exception.h" #include "util_cuda.h" namespace nnforge { namespace cuda { __global__ void dropout_upd_kernel( const float4 * __restrict input, float4 * __restrict output, const float4 * __restrict uniform_random, // (0.0,1.0] float mult, float keep_rate, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; float4 rnd = uniform_random[elem_id]; val.x *= (rnd.x <= keep_rate ? mult : 0.0F); val.y *= (rnd.y <= keep_rate ? mult : 0.0F); val.z *= (rnd.z <= keep_rate ? mult : 0.0F); val.w *= (rnd.w <= keep_rate ? mult : 0.0F); output[elem_id] = val; } } __global__ void dropout_backprop_upd_kernel( float4 * __restrict errors, const float4 * __restrict uniform_random, // (0.0,1.0] float mult, float keep_rate, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = errors[elem_id]; float4 rnd = uniform_random[elem_id]; val.x *= (rnd.x <= keep_rate ? mult : 0.0F); val.y *= (rnd.y <= keep_rate ? mult : 0.0F); val.z *= (rnd.z <= keep_rate ? mult : 0.0F); val.w *= (rnd.w <= keep_rate ? mult : 0.0F); errors[elem_id] = val; } } dropout_layer_updater_cuda::dropout_layer_updater_cuda() { } dropout_layer_updater_cuda::~dropout_layer_updater_cuda() { } void dropout_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { if (offset_input_entry_id > 0) throw neural_network_exception("dropout_layer_updater_cuda is not able to run using offset"); if (force_deterministic) { cuda_util::copy_buffer(*cuda_config, *input_neurons_buffer, *output_neurons_buffer, input_elem_count_per_entry * entry_count, stream_id); } else { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); curand_safe_call(hiprandSetStream(cuda_config->get_curand_generator(), stream_id)); curand_safe_call(hiprandGenerateUniform(cuda_config->get_curand_generator(), *additional_buffers[0], elem_count * 4)); hipLaunchKernelGGL(( dropout_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_neurons_buffer, *output_neurons_buffer, *additional_buffers[0], mult, keep_rate, elem_count); } } void dropout_layer_updater_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { if (!force_deterministic) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( dropout_backprop_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], mult, keep_rate, elem_count); } } std::vector<size_t> dropout_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(output_elem_count_per_entry * sizeof(float)); return res; } void dropout_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const dropout_layer> layer_derived = nnforge_dynamic_pointer_cast<const dropout_layer>(layer_schema); dropout_rate = layer_derived->dropout_rate; keep_rate = 1.0F - dropout_rate; mult = 1.0F / keep_rate; } bool dropout_layer_updater_cuda::is_in_place_backprop() const { return true; } } }
5eeca77cfe960408f1e08a53ce13041b4924f772.cu
/* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "dropout_layer_updater_cuda.h" #include "../dropout_layer.h" #include "neural_network_curand_exception.h" #include "util_cuda.h" namespace nnforge { namespace cuda { __global__ void dropout_upd_kernel( const float4 * __restrict input, float4 * __restrict output, const float4 * __restrict uniform_random, // (0.0,1.0] float mult, float keep_rate, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; float4 rnd = uniform_random[elem_id]; val.x *= (rnd.x <= keep_rate ? mult : 0.0F); val.y *= (rnd.y <= keep_rate ? mult : 0.0F); val.z *= (rnd.z <= keep_rate ? mult : 0.0F); val.w *= (rnd.w <= keep_rate ? mult : 0.0F); output[elem_id] = val; } } __global__ void dropout_backprop_upd_kernel( float4 * __restrict errors, const float4 * __restrict uniform_random, // (0.0,1.0] float mult, float keep_rate, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = errors[elem_id]; float4 rnd = uniform_random[elem_id]; val.x *= (rnd.x <= keep_rate ? mult : 0.0F); val.y *= (rnd.y <= keep_rate ? mult : 0.0F); val.z *= (rnd.z <= keep_rate ? mult : 0.0F); val.w *= (rnd.w <= keep_rate ? mult : 0.0F); errors[elem_id] = val; } } dropout_layer_updater_cuda::dropout_layer_updater_cuda() { } dropout_layer_updater_cuda::~dropout_layer_updater_cuda() { } void dropout_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { if (offset_input_entry_id > 0) throw neural_network_exception("dropout_layer_updater_cuda is not able to run using offset"); if (force_deterministic) { cuda_util::copy_buffer(*cuda_config, *input_neurons_buffer, *output_neurons_buffer, input_elem_count_per_entry * entry_count, stream_id); } else { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); curand_safe_call(curandSetStream(cuda_config->get_curand_generator(), stream_id)); curand_safe_call(curandGenerateUniform(cuda_config->get_curand_generator(), *additional_buffers[0], elem_count * 4)); dropout_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_neurons_buffer, *output_neurons_buffer, *additional_buffers[0], mult, keep_rate, elem_count); } } void dropout_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { if (!force_deterministic) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); dropout_backprop_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_errors_buffer, *additional_buffers[0], mult, keep_rate, elem_count); } } std::vector<size_t> dropout_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(output_elem_count_per_entry * sizeof(float)); return res; } void dropout_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const dropout_layer> layer_derived = nnforge_dynamic_pointer_cast<const dropout_layer>(layer_schema); dropout_rate = layer_derived->dropout_rate; keep_rate = 1.0F - dropout_rate; mult = 1.0F / keep_rate; } bool dropout_layer_updater_cuda::is_in_place_backprop() const { return true; } } }
e82e2a3a520118675daacace49c363353bda0105.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <exception> #include <map> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/count.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <utilities/error_utils.hpp> #include "nvstrings/NVStrings.h" #include "./NVStringsImpl.h" #include "../custring_view.cuh" #include "../util.h" // used to index values in a timeparts array #define TP_YEAR 0 #define TP_MONTH 1 #define TP_DAY 2 #define TP_HOUR 3 #define TP_MINUTE 4 #define TP_SECOND 5 #define TP_SUBSECOND 6 #define TP_TZ_MINUTES 7 #define TP_ARRAYSIZE 8 struct DTFormatItem { bool item_type; // 1=specifier, 0=literal char specifier; // specifier short length; // item length in bytes char literal; // pass-thru character // this should be a Char static DTFormatItem new_specifier(char fmt, short len) { DTFormatItem item{true,fmt,len,0}; return item; } static DTFormatItem new_delimiter(char ch) { DTFormatItem item{false,0,1,ch}; return item; } }; struct DTProgram { size_t count; DTFormatItem* items; }; struct DTFormatCompiler { std::vector<DTFormatItem> items; const char* format; size_t length; std::string template_string; NVStrings::timestamp_units units; DTProgram* d_prog; DTFormatItem* d_items; std::map<char,short> specifiers = { {'a',0}, {'A',0}, {'w',1}, {'b',0}, {'B',0}, {'Y',4},{'y',2}, {'m',2}, {'d',2}, {'H',2},{'I',2},{'M',2},{'S',2},{'f',6}, {'p',2},{'z',5},{'Z',3}, {'j',3},{'U',2},{'W',2} }; DTFormatCompiler( const char* format, size_t length, NVStrings::timestamp_units units ) : format(format), length(length), units(units), d_prog(nullptr), d_items(nullptr) {} ~DTFormatCompiler() { if( !d_prog ) RMM_FREE(d_prog,0); if( !d_items ) RMM_FREE(d_items,0); } DTProgram* compile_to_device() { //printf("dtc: format=[%s],%ld\n",format,length); const char* str = format; while( length > 0 ) { char ch = *str++; length--; if( ch!='%' ) { // this should be a Char items.push_back(DTFormatItem::new_delimiter(ch)); template_string.append(ch,1); continue; } if( length==0 ) throw std::invalid_argument("unfinished specifier"); ch = *str++; length--; if( ch=='%' ) { // escaped % items.push_back(DTFormatItem::new_delimiter(ch)); template_string.append(ch,1); continue; } if( specifiers.find(ch)==specifiers.end() ) { fprintf(stderr,"specifier %c unrecognized\n",ch); throw std::invalid_argument("invalid specifier"); } short flen = specifiers[ch]; if( ch=='f' ) { if( units==NVStrings::timestamp_units::ms ) flen = 3; else if( units==NVStrings::timestamp_units::ns ) flen = 9; } items.push_back(DTFormatItem::new_specifier(ch,flen)); template_string.append(ch,flen); } // create in device memory size_t buffer_size = items.size() * sizeof(DTFormatItem); d_items = reinterpret_cast<DTFormatItem*>(device_alloc<char>(buffer_size,0)); CUDA_TRY( hipMemcpyAsync(d_items, items.data(), buffer_size, hipMemcpyHostToDevice)) DTProgram hprog{items.size(),d_items}; d_prog = reinterpret_cast<DTProgram*>(device_alloc<char>(sizeof(DTProgram),0)); CUDA_TRY( hipMemcpyAsync(d_prog,&hprog,sizeof(DTProgram),hipMemcpyHostToDevice)) return d_prog; } // call valid only after compile size_t string_length() { return template_string.size(); } const char* string_template() { return template_string.c_str(); } size_t size() { return items.size(); } }; // this parses date/time characters into long timestamp struct parse_datetime { custring_view_array d_strings; unsigned long* d_timestamps; NVStrings::timestamp_units units; DTProgram* d_prog; parse_datetime( DTProgram* prog, custring_view_array strings, NVStrings::timestamp_units units, unsigned long* results ) : d_prog(prog), d_strings(strings), d_timestamps(results), units(units) {} // could use the custring::stoi but this should be faster since we know the data limits __device__ int str2int( const char* str, unsigned int bytes ) { const char* ptr = str; int value = 0; for( unsigned int idx=0; idx < bytes; ++idx ) { char chr = *ptr++; if( chr < '0' || chr > '9' ) break; value = (value * 10) + (int)(chr - '0'); } return value; } // only supports ascii __device__ int strcmp_ignore_case( const char* str1, const char* str2, size_t len ) { for( size_t idx=0; idx < len; ++idx ) { char ch1 = *str1; if( ch1 >= 'a' && ch1 <= 'z' ) ch1 = ch1 - 'a' + 'A'; char ch2 = *str2; if( ch2 >= 'a' && ch2 <= 'z' ) ch2 = ch2 - 'a' + 'A'; if( ch1==ch2 ) continue; return (int)(ch1 - ch2); } return 0; } // walk the prog to read the datetime string // return 0 if all ok __device__ int parse_into_parts( custring_view* d_string, int* timeparts ) { unsigned int count = d_prog->count; DTFormatItem* items = d_prog->items; const char* ptr = d_string->data(); unsigned int length = d_string->size(); for( unsigned int idx=0; idx < count; ++idx ) { DTFormatItem item = items[idx]; int slen = (int)item.length; //printf("%d:%c=%d\n",(int)fmt.ftype,ch,(int)slen); if(item.item_type==false) { // consume fmt.len bytes from datetime // could also check ch matches and throw exception if it does not ptr += slen; length -= slen; continue; } if( length < slen ) return 1; // special logic for each specifier switch(item.specifier) { case 'Y': timeparts[TP_YEAR] = str2int(ptr,slen); break; case 'y': timeparts[TP_YEAR] = str2int(ptr,slen)+1900; break; case 'm': timeparts[TP_MONTH] = str2int(ptr,slen); break; case 'd': case 'j': timeparts[TP_DAY] = str2int(ptr,slen); break; case 'H': case 'I': timeparts[TP_HOUR] = str2int(ptr,slen); break; case 'M': timeparts[TP_MINUTE] = str2int(ptr,slen); break; case 'S': timeparts[TP_SECOND] = str2int(ptr,slen); break; case 'f': timeparts[TP_SUBSECOND] = str2int(ptr,slen); break; case 'p': if( timeparts[TP_HOUR] <= 12 && strcmp_ignore_case(ptr,"PM",2)==0 ) // strncasecmp timeparts[TP_HOUR] += 12; break; case 'z': { int sign = *ptr=='-' ? -1:1; int hh = str2int(ptr+1,2); int mm = str2int(ptr+3,2); // ignoring the rest for now // slen has how many chars we should read timeparts[TP_TZ_MINUTES] = sign * ((hh*60)+mm); break; } case 'Z': if( strcmp_ignore_case(ptr,"UTC",3)!=0 ) return 2; break; // only recognize UTC default: return 3; } //printf(">>%d:%d\n",part,timeparts[part]); ptr += slen; length -= slen; } return 0; } __device__ long timestamp_from_parts( int* timeparts, NVStrings::timestamp_units units ) { int year = timeparts[TP_YEAR]; if( units==NVStrings::timestamp_units::years ) return year - 1970; int month = timeparts[TP_MONTH]; if( units==NVStrings::timestamp_units::months ) return ((year-1970) * 12) + (month-1); // months are 1-12, need to 0-base it here int day = timeparts[TP_DAY]; // The months are shifted so that March is the starting month and February // (possible leap day in it) is the last month for the linear calculation year -= (month <= 2) ? 1 : 0; // date cycle repeats every 400 years (era) const int erasInDays = 146097; const int erasInYears = (erasInDays / 365); const int era = (year >= 0 ? year : year - 399) / erasInYears; const int yoe = year - era * erasInYears; const int doy = month==0 ? day : ((153 * (month + (month > 2 ? -3 : 9)) + 2) / 5 + day - 1); const int doe = (yoe * 365) + (yoe / 4) - (yoe / 100) + doy; int days = (era * erasInDays) + doe - 719468; // 719468 = days from 0000-00-00 to 1970-03-01 if( units==NVStrings::timestamp_units::days ) return days; int tzadjust = timeparts[TP_TZ_MINUTES]; // in minutes int hour = timeparts[TP_HOUR]; if( units==NVStrings::timestamp_units::hours ) return (days*24L) + hour + (tzadjust/60); int minute = timeparts[TP_MINUTE]; if( units==NVStrings::timestamp_units::minutes ) return (long)(days * 24L * 60L) + (hour * 60L) + minute + tzadjust; int second = timeparts[TP_SECOND]; long timestamp = (days * 24L * 3600L) + (hour * 3600L) + (minute * 60L) + second + (tzadjust*60); if( units==NVStrings::timestamp_units::seconds ) return timestamp; int subsecond = timeparts[TP_SUBSECOND]; if( units==NVStrings::timestamp_units::ms ) timestamp *= 1000L; else if( units==NVStrings::timestamp_units::us ) timestamp *= 1000000L; else if( units==NVStrings::timestamp_units::ns ) timestamp *= 1000000000L; timestamp += subsecond; return timestamp; } __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( (dstr==0) || dstr->empty() ) { d_timestamps[idx] = 0; return; } // int timeparts[TP_ARRAYSIZE] = {0,1,1}; // month and day are 1-based if( parse_into_parts(dstr,timeparts) ) d_timestamps[idx] = 0; else d_timestamps[idx] = timestamp_from_parts(timeparts,units); } }; // convert date format into timestamp long integer int NVStrings::timestamp2long( const char* format, timestamp_units units, unsigned long* results, bool bdevmem ) { unsigned int count = size(); if( count==0 || results==0 ) return -1; auto execpol = rmm::exec_policy(0); unsigned long* d_rtn = results; if( !bdevmem ) d_rtn = device_alloc<unsigned long>(count,0); if( format==0 ) format = "%Y-%m-%dT%H:%M:%SZ"; size_t length = strlen(format); DTFormatCompiler dtc(format,length,units); DTProgram* prog = dtc.compile_to_device(); custring_view** d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, parse_datetime(prog,d_strings,units,d_rtn)); // int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0); if( !bdevmem ) { CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(unsigned long)*count,hipMemcpyDeviceToHost)) RMM_FREE(d_rtn,0); } return (int)count-zeros; } // converts long timestamp into date-time string struct datetime_formatter { unsigned long* d_timestamps; custring_view_array d_strings; unsigned char* d_nulls; char* d_buffer; size_t* d_offsets; NVStrings::timestamp_units units; DTProgram* d_prog; datetime_formatter( DTProgram* prog, NVStrings::timestamp_units units, char* buffer, size_t* offsets, unsigned char* nulls, unsigned long* timestamps, custring_view_array strings) : d_timestamps(timestamps), d_buffer(buffer), d_offsets(offsets), d_nulls(nulls), d_strings(strings), units(units), d_prog(prog) {} __device__ void dissect_timestamp( long timestamp, int* timeparts ) { if( units==NVStrings::timestamp_units::years ) { timeparts[TP_YEAR] = (int)timestamp + 1970; timeparts[TP_MONTH] = 1; timeparts[TP_DAY] = 1; return; } if( units==NVStrings::timestamp_units::months ) { int month = timestamp % 12; int year = (timestamp / 12) + 1970; timeparts[TP_YEAR] = year; timeparts[TP_MONTH] = month +1; // months start at 1 and not 0 timeparts[TP_DAY] = 1; return; } // first, convert to days so we can handle months, leap years, etc. int days = (int)timestamp; if( units==NVStrings::timestamp_units::hours ) days = (int)(timestamp / 24L); else if( units==NVStrings::timestamp_units::minutes ) days = (int)(timestamp / 1440L); // 24*60 else if( units==NVStrings::timestamp_units::seconds ) days = (int)(timestamp / 86400L); // 24*60*60 else if( units==NVStrings::timestamp_units::ms ) days = (int)(timestamp / 86400000L); else if( units==NVStrings::timestamp_units::us ) days = (int)(timestamp / 86400000000L); else if( units==NVStrings::timestamp_units::ns ) days = (int)(timestamp / 86400000000000L); days = days + 719468; // 719468 is days between 0000-00-00 and 1970-01-01 const int daysInEra = 146097; // (400*365)+97 const int daysInCentury = 36524; // (100*365) + 24; const int daysIn4Years = 1461; // (4*365) + 1; const int daysInYear = 365; // day offsets for each month: Mar Apr May June July Aug Sep Oct Nov Dec Jan Feb const int monthDayOffset[] = { 0, 31, 61, 92, 122, 153, 184, 214, 245, 275, 306, 337, 366 }; // code logic handles leap years in chunks: 400y,100y,4y,1y int year = 400 * (days / daysInEra); days = days % daysInEra; int leapy = days / daysInCentury; days = days % daysInCentury; if( leapy==4 ) { // landed exactly on a leap century days += daysInCentury; --leapy; } year += 100 * leapy; year += 4 * (days / daysIn4Years); days = days % daysIn4Years; leapy = days / daysInYear; days = days % daysInYear; if( leapy==4 ) { // landed exactly on a leap year days += daysInYear; --leapy; } year += leapy; // int month = 12; for( int idx=0; idx < month; ++idx ) { // find the month if( days < monthDayOffset[idx+1] ) { month = idx; break; } } int day = days - monthDayOffset[month] +1; // compute day of month if( month >= 10 ) ++year; month = ((month + 2) % 12) +1; // adjust Jan-Mar offset timeparts[TP_YEAR] = year; timeparts[TP_MONTH] = month; timeparts[TP_DAY] = day; if( units==NVStrings::timestamp_units::days ) return; // done with date // now work on time long hour = timestamp, minute = timestamp, second = timestamp; if( units==NVStrings::timestamp_units::hours ) { timeparts[TP_HOUR] = (int)(hour % 24); return; } hour = hour / 60; if( units==NVStrings::timestamp_units::minutes ) { timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); return; } hour = hour / 60; minute = minute / 60; if( units==NVStrings::timestamp_units::seconds ) { timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); timeparts[TP_SECOND] = (int)(second % 60); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; if( units==NVStrings::timestamp_units::ms ) { timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); timeparts[TP_SECOND] = (int)(second % 60); timeparts[TP_SUBSECOND] = (int)(timestamp % 1000); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; if( units==NVStrings::timestamp_units::us ) { timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); timeparts[TP_SECOND] = (int)(second % 60); timeparts[TP_SUBSECOND] = (int)(timestamp % 1000000); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); timeparts[TP_SECOND] = (int)(second % 60); timeparts[TP_SUBSECOND] = (int)(timestamp % 1000000000); } // utility to create 0-padded integers (up to 4 bytes) __device__ char* int2str( char* str, int len, int val ) { char tmpl[9] = {'0','0','0','0','0','0','0','0','0'}; char* ptr = tmpl; while( val > 0 ) { int digit = val % 10; *ptr++ = '0' + digit; val = val / 10; } ptr = tmpl + len-1; while( len > 0 ) { *str++ = *ptr--; --len; } return str; } __device__ char* format_from_parts( int* timeparts, char* ptr ) { size_t count = d_prog->count; DTFormatItem* d_items = d_prog->items; for( size_t idx=0; idx < count; ++idx ) { DTFormatItem item = d_items[idx]; int slen = (int)item.length; //printf("%d:%c=%d\n",(int)fmt.ftype,ch,(int)slen); if(item.item_type==false) { *ptr++ = item.literal; continue; } // special logic for each specifier switch(item.specifier) { case 'Y': ptr = int2str(ptr,slen,timeparts[TP_YEAR]); break; case 'y': ptr = int2str(ptr,slen,timeparts[TP_YEAR]-1900); break; case 'm': ptr = int2str(ptr,slen,timeparts[TP_MONTH]); break; case 'd': case 'j': ptr = int2str(ptr,slen,timeparts[TP_DAY]); break; case 'H': ptr = int2str(ptr,slen,timeparts[TP_HOUR]); break; case 'I': ptr = int2str(ptr,slen,timeparts[TP_HOUR] % 12); break; case 'M': ptr = int2str(ptr,slen,timeparts[TP_MINUTE]); break; case 'S': ptr = int2str(ptr,slen,timeparts[TP_SECOND]); break; case 'f': ptr = int2str(ptr,slen,timeparts[TP_SUBSECOND]); break; case 'p': if( timeparts[TP_HOUR] <= 12 ) memcpy(ptr,"AM",2); else memcpy(ptr,"PM",2); ptr += 2; break; case 'z': break; // do nothing for this one case 'Z': memcpy(ptr,"UTC",3); ptr += 3; break; default: break; } } return ptr; } __device__ void operator()( unsigned int idx ) { if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) { d_strings[idx] = nullptr; return; } long timestamp = d_timestamps[idx]; int timeparts[TP_ARRAYSIZE] = {0}; dissect_timestamp(timestamp,timeparts); // convert to characters char* str = d_buffer + d_offsets[idx]; char* ptr = format_from_parts(timeparts,str); int len = (int)(ptr - str); d_strings[idx] = custring_view::create_from(str,str,len); } }; NVStrings* NVStrings::long2timestamp( const unsigned long* values, unsigned int count, timestamp_units units, const char* format, const unsigned char* nullbitmask, bool bdevmem ) { if( values==0 || count==0 ) throw std::invalid_argument("nvstrings::long2timestamp values or count invalid"); auto execpol = rmm::exec_policy(0); NVStrings* rtn = new NVStrings(count); unsigned long* d_values = (unsigned long*)values; unsigned char* d_nulls = (unsigned char*)nullbitmask; if( !bdevmem ) { d_values = device_alloc<unsigned long>(count,0); CUDA_TRY( hipMemcpyAsync(d_values,values,count*sizeof(unsigned long),hipMemcpyHostToDevice)) if( nullbitmask ) { d_nulls = device_alloc<unsigned char>(((count+7)/8),0); CUDA_TRY( hipMemcpyAsync(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),hipMemcpyHostToDevice)) } } if( format==0 ) format = "%Y-%m-%dT%H:%M:%SZ"; size_t length = strlen(format); DTFormatCompiler dtc(format,length,units); DTProgram* prog = dtc.compile_to_device(); // compute size of memory we'll need // each string will be the same size with the length int d_size = custring_view::alloc_size(dtc.string_template(),dtc.string_length()); // we only need to account for any null strings rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_values, d_size, d_nulls, d_sizes] __device__ (unsigned int idx) { if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) d_sizes[idx] = 0; else d_sizes[idx] = ALIGN_SIZE(d_size); }); rmm::device_vector<size_t> offsets(count,0); size_t* d_offsets = offsets.data().get(); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // build iso8601 strings from timestamps char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); custring_view_array d_strings = rtn->pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, datetime_formatter(prog, units,d_buffer, d_offsets, d_nulls, d_values, d_strings)); // if( !bdevmem ) RMM_FREE(d_values,0); return rtn; }
e82e2a3a520118675daacace49c363353bda0105.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <exception> #include <map> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/count.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <utilities/error_utils.hpp> #include "nvstrings/NVStrings.h" #include "./NVStringsImpl.h" #include "../custring_view.cuh" #include "../util.h" // used to index values in a timeparts array #define TP_YEAR 0 #define TP_MONTH 1 #define TP_DAY 2 #define TP_HOUR 3 #define TP_MINUTE 4 #define TP_SECOND 5 #define TP_SUBSECOND 6 #define TP_TZ_MINUTES 7 #define TP_ARRAYSIZE 8 struct DTFormatItem { bool item_type; // 1=specifier, 0=literal char specifier; // specifier short length; // item length in bytes char literal; // pass-thru character // this should be a Char static DTFormatItem new_specifier(char fmt, short len) { DTFormatItem item{true,fmt,len,0}; return item; } static DTFormatItem new_delimiter(char ch) { DTFormatItem item{false,0,1,ch}; return item; } }; struct DTProgram { size_t count; DTFormatItem* items; }; struct DTFormatCompiler { std::vector<DTFormatItem> items; const char* format; size_t length; std::string template_string; NVStrings::timestamp_units units; DTProgram* d_prog; DTFormatItem* d_items; std::map<char,short> specifiers = { {'a',0}, {'A',0}, {'w',1}, {'b',0}, {'B',0}, {'Y',4},{'y',2}, {'m',2}, {'d',2}, {'H',2},{'I',2},{'M',2},{'S',2},{'f',6}, {'p',2},{'z',5},{'Z',3}, {'j',3},{'U',2},{'W',2} }; DTFormatCompiler( const char* format, size_t length, NVStrings::timestamp_units units ) : format(format), length(length), units(units), d_prog(nullptr), d_items(nullptr) {} ~DTFormatCompiler() { if( !d_prog ) RMM_FREE(d_prog,0); if( !d_items ) RMM_FREE(d_items,0); } DTProgram* compile_to_device() { //printf("dtc: format=[%s],%ld\n",format,length); const char* str = format; while( length > 0 ) { char ch = *str++; length--; if( ch!='%' ) { // this should be a Char items.push_back(DTFormatItem::new_delimiter(ch)); template_string.append(ch,1); continue; } if( length==0 ) throw std::invalid_argument("unfinished specifier"); ch = *str++; length--; if( ch=='%' ) { // escaped % items.push_back(DTFormatItem::new_delimiter(ch)); template_string.append(ch,1); continue; } if( specifiers.find(ch)==specifiers.end() ) { fprintf(stderr,"specifier %c unrecognized\n",ch); throw std::invalid_argument("invalid specifier"); } short flen = specifiers[ch]; if( ch=='f' ) { if( units==NVStrings::timestamp_units::ms ) flen = 3; else if( units==NVStrings::timestamp_units::ns ) flen = 9; } items.push_back(DTFormatItem::new_specifier(ch,flen)); template_string.append(ch,flen); } // create in device memory size_t buffer_size = items.size() * sizeof(DTFormatItem); d_items = reinterpret_cast<DTFormatItem*>(device_alloc<char>(buffer_size,0)); CUDA_TRY( cudaMemcpyAsync(d_items, items.data(), buffer_size, cudaMemcpyHostToDevice)) DTProgram hprog{items.size(),d_items}; d_prog = reinterpret_cast<DTProgram*>(device_alloc<char>(sizeof(DTProgram),0)); CUDA_TRY( cudaMemcpyAsync(d_prog,&hprog,sizeof(DTProgram),cudaMemcpyHostToDevice)) return d_prog; } // call valid only after compile size_t string_length() { return template_string.size(); } const char* string_template() { return template_string.c_str(); } size_t size() { return items.size(); } }; // this parses date/time characters into long timestamp struct parse_datetime { custring_view_array d_strings; unsigned long* d_timestamps; NVStrings::timestamp_units units; DTProgram* d_prog; parse_datetime( DTProgram* prog, custring_view_array strings, NVStrings::timestamp_units units, unsigned long* results ) : d_prog(prog), d_strings(strings), d_timestamps(results), units(units) {} // could use the custring::stoi but this should be faster since we know the data limits __device__ int str2int( const char* str, unsigned int bytes ) { const char* ptr = str; int value = 0; for( unsigned int idx=0; idx < bytes; ++idx ) { char chr = *ptr++; if( chr < '0' || chr > '9' ) break; value = (value * 10) + (int)(chr - '0'); } return value; } // only supports ascii __device__ int strcmp_ignore_case( const char* str1, const char* str2, size_t len ) { for( size_t idx=0; idx < len; ++idx ) { char ch1 = *str1; if( ch1 >= 'a' && ch1 <= 'z' ) ch1 = ch1 - 'a' + 'A'; char ch2 = *str2; if( ch2 >= 'a' && ch2 <= 'z' ) ch2 = ch2 - 'a' + 'A'; if( ch1==ch2 ) continue; return (int)(ch1 - ch2); } return 0; } // walk the prog to read the datetime string // return 0 if all ok __device__ int parse_into_parts( custring_view* d_string, int* timeparts ) { unsigned int count = d_prog->count; DTFormatItem* items = d_prog->items; const char* ptr = d_string->data(); unsigned int length = d_string->size(); for( unsigned int idx=0; idx < count; ++idx ) { DTFormatItem item = items[idx]; int slen = (int)item.length; //printf("%d:%c=%d\n",(int)fmt.ftype,ch,(int)slen); if(item.item_type==false) { // consume fmt.len bytes from datetime // could also check ch matches and throw exception if it does not ptr += slen; length -= slen; continue; } if( length < slen ) return 1; // special logic for each specifier switch(item.specifier) { case 'Y': timeparts[TP_YEAR] = str2int(ptr,slen); break; case 'y': timeparts[TP_YEAR] = str2int(ptr,slen)+1900; break; case 'm': timeparts[TP_MONTH] = str2int(ptr,slen); break; case 'd': case 'j': timeparts[TP_DAY] = str2int(ptr,slen); break; case 'H': case 'I': timeparts[TP_HOUR] = str2int(ptr,slen); break; case 'M': timeparts[TP_MINUTE] = str2int(ptr,slen); break; case 'S': timeparts[TP_SECOND] = str2int(ptr,slen); break; case 'f': timeparts[TP_SUBSECOND] = str2int(ptr,slen); break; case 'p': if( timeparts[TP_HOUR] <= 12 && strcmp_ignore_case(ptr,"PM",2)==0 ) // strncasecmp timeparts[TP_HOUR] += 12; break; case 'z': { int sign = *ptr=='-' ? -1:1; int hh = str2int(ptr+1,2); int mm = str2int(ptr+3,2); // ignoring the rest for now // slen has how many chars we should read timeparts[TP_TZ_MINUTES] = sign * ((hh*60)+mm); break; } case 'Z': if( strcmp_ignore_case(ptr,"UTC",3)!=0 ) return 2; break; // only recognize UTC default: return 3; } //printf(">>%d:%d\n",part,timeparts[part]); ptr += slen; length -= slen; } return 0; } __device__ long timestamp_from_parts( int* timeparts, NVStrings::timestamp_units units ) { int year = timeparts[TP_YEAR]; if( units==NVStrings::timestamp_units::years ) return year - 1970; int month = timeparts[TP_MONTH]; if( units==NVStrings::timestamp_units::months ) return ((year-1970) * 12) + (month-1); // months are 1-12, need to 0-base it here int day = timeparts[TP_DAY]; // The months are shifted so that March is the starting month and February // (possible leap day in it) is the last month for the linear calculation year -= (month <= 2) ? 1 : 0; // date cycle repeats every 400 years (era) const int erasInDays = 146097; const int erasInYears = (erasInDays / 365); const int era = (year >= 0 ? year : year - 399) / erasInYears; const int yoe = year - era * erasInYears; const int doy = month==0 ? day : ((153 * (month + (month > 2 ? -3 : 9)) + 2) / 5 + day - 1); const int doe = (yoe * 365) + (yoe / 4) - (yoe / 100) + doy; int days = (era * erasInDays) + doe - 719468; // 719468 = days from 0000-00-00 to 1970-03-01 if( units==NVStrings::timestamp_units::days ) return days; int tzadjust = timeparts[TP_TZ_MINUTES]; // in minutes int hour = timeparts[TP_HOUR]; if( units==NVStrings::timestamp_units::hours ) return (days*24L) + hour + (tzadjust/60); int minute = timeparts[TP_MINUTE]; if( units==NVStrings::timestamp_units::minutes ) return (long)(days * 24L * 60L) + (hour * 60L) + minute + tzadjust; int second = timeparts[TP_SECOND]; long timestamp = (days * 24L * 3600L) + (hour * 3600L) + (minute * 60L) + second + (tzadjust*60); if( units==NVStrings::timestamp_units::seconds ) return timestamp; int subsecond = timeparts[TP_SUBSECOND]; if( units==NVStrings::timestamp_units::ms ) timestamp *= 1000L; else if( units==NVStrings::timestamp_units::us ) timestamp *= 1000000L; else if( units==NVStrings::timestamp_units::ns ) timestamp *= 1000000000L; timestamp += subsecond; return timestamp; } __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( (dstr==0) || dstr->empty() ) { d_timestamps[idx] = 0; return; } // int timeparts[TP_ARRAYSIZE] = {0,1,1}; // month and day are 1-based if( parse_into_parts(dstr,timeparts) ) d_timestamps[idx] = 0; else d_timestamps[idx] = timestamp_from_parts(timeparts,units); } }; // convert date format into timestamp long integer int NVStrings::timestamp2long( const char* format, timestamp_units units, unsigned long* results, bool bdevmem ) { unsigned int count = size(); if( count==0 || results==0 ) return -1; auto execpol = rmm::exec_policy(0); unsigned long* d_rtn = results; if( !bdevmem ) d_rtn = device_alloc<unsigned long>(count,0); if( format==0 ) format = "%Y-%m-%dT%H:%M:%SZ"; size_t length = strlen(format); DTFormatCompiler dtc(format,length,units); DTProgram* prog = dtc.compile_to_device(); custring_view** d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, parse_datetime(prog,d_strings,units,d_rtn)); // int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0); if( !bdevmem ) { CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(unsigned long)*count,cudaMemcpyDeviceToHost)) RMM_FREE(d_rtn,0); } return (int)count-zeros; } // converts long timestamp into date-time string struct datetime_formatter { unsigned long* d_timestamps; custring_view_array d_strings; unsigned char* d_nulls; char* d_buffer; size_t* d_offsets; NVStrings::timestamp_units units; DTProgram* d_prog; datetime_formatter( DTProgram* prog, NVStrings::timestamp_units units, char* buffer, size_t* offsets, unsigned char* nulls, unsigned long* timestamps, custring_view_array strings) : d_timestamps(timestamps), d_buffer(buffer), d_offsets(offsets), d_nulls(nulls), d_strings(strings), units(units), d_prog(prog) {} __device__ void dissect_timestamp( long timestamp, int* timeparts ) { if( units==NVStrings::timestamp_units::years ) { timeparts[TP_YEAR] = (int)timestamp + 1970; timeparts[TP_MONTH] = 1; timeparts[TP_DAY] = 1; return; } if( units==NVStrings::timestamp_units::months ) { int month = timestamp % 12; int year = (timestamp / 12) + 1970; timeparts[TP_YEAR] = year; timeparts[TP_MONTH] = month +1; // months start at 1 and not 0 timeparts[TP_DAY] = 1; return; } // first, convert to days so we can handle months, leap years, etc. int days = (int)timestamp; if( units==NVStrings::timestamp_units::hours ) days = (int)(timestamp / 24L); else if( units==NVStrings::timestamp_units::minutes ) days = (int)(timestamp / 1440L); // 24*60 else if( units==NVStrings::timestamp_units::seconds ) days = (int)(timestamp / 86400L); // 24*60*60 else if( units==NVStrings::timestamp_units::ms ) days = (int)(timestamp / 86400000L); else if( units==NVStrings::timestamp_units::us ) days = (int)(timestamp / 86400000000L); else if( units==NVStrings::timestamp_units::ns ) days = (int)(timestamp / 86400000000000L); days = days + 719468; // 719468 is days between 0000-00-00 and 1970-01-01 const int daysInEra = 146097; // (400*365)+97 const int daysInCentury = 36524; // (100*365) + 24; const int daysIn4Years = 1461; // (4*365) + 1; const int daysInYear = 365; // day offsets for each month: Mar Apr May June July Aug Sep Oct Nov Dec Jan Feb const int monthDayOffset[] = { 0, 31, 61, 92, 122, 153, 184, 214, 245, 275, 306, 337, 366 }; // code logic handles leap years in chunks: 400y,100y,4y,1y int year = 400 * (days / daysInEra); days = days % daysInEra; int leapy = days / daysInCentury; days = days % daysInCentury; if( leapy==4 ) { // landed exactly on a leap century days += daysInCentury; --leapy; } year += 100 * leapy; year += 4 * (days / daysIn4Years); days = days % daysIn4Years; leapy = days / daysInYear; days = days % daysInYear; if( leapy==4 ) { // landed exactly on a leap year days += daysInYear; --leapy; } year += leapy; // int month = 12; for( int idx=0; idx < month; ++idx ) { // find the month if( days < monthDayOffset[idx+1] ) { month = idx; break; } } int day = days - monthDayOffset[month] +1; // compute day of month if( month >= 10 ) ++year; month = ((month + 2) % 12) +1; // adjust Jan-Mar offset timeparts[TP_YEAR] = year; timeparts[TP_MONTH] = month; timeparts[TP_DAY] = day; if( units==NVStrings::timestamp_units::days ) return; // done with date // now work on time long hour = timestamp, minute = timestamp, second = timestamp; if( units==NVStrings::timestamp_units::hours ) { timeparts[TP_HOUR] = (int)(hour % 24); return; } hour = hour / 60; if( units==NVStrings::timestamp_units::minutes ) { timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); return; } hour = hour / 60; minute = minute / 60; if( units==NVStrings::timestamp_units::seconds ) { timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); timeparts[TP_SECOND] = (int)(second % 60); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; if( units==NVStrings::timestamp_units::ms ) { timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); timeparts[TP_SECOND] = (int)(second % 60); timeparts[TP_SUBSECOND] = (int)(timestamp % 1000); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; if( units==NVStrings::timestamp_units::us ) { timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); timeparts[TP_SECOND] = (int)(second % 60); timeparts[TP_SUBSECOND] = (int)(timestamp % 1000000); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; timeparts[TP_HOUR] = (int)(hour % 24); timeparts[TP_MINUTE] = (int)(minute % 60); timeparts[TP_SECOND] = (int)(second % 60); timeparts[TP_SUBSECOND] = (int)(timestamp % 1000000000); } // utility to create 0-padded integers (up to 4 bytes) __device__ char* int2str( char* str, int len, int val ) { char tmpl[9] = {'0','0','0','0','0','0','0','0','0'}; char* ptr = tmpl; while( val > 0 ) { int digit = val % 10; *ptr++ = '0' + digit; val = val / 10; } ptr = tmpl + len-1; while( len > 0 ) { *str++ = *ptr--; --len; } return str; } __device__ char* format_from_parts( int* timeparts, char* ptr ) { size_t count = d_prog->count; DTFormatItem* d_items = d_prog->items; for( size_t idx=0; idx < count; ++idx ) { DTFormatItem item = d_items[idx]; int slen = (int)item.length; //printf("%d:%c=%d\n",(int)fmt.ftype,ch,(int)slen); if(item.item_type==false) { *ptr++ = item.literal; continue; } // special logic for each specifier switch(item.specifier) { case 'Y': ptr = int2str(ptr,slen,timeparts[TP_YEAR]); break; case 'y': ptr = int2str(ptr,slen,timeparts[TP_YEAR]-1900); break; case 'm': ptr = int2str(ptr,slen,timeparts[TP_MONTH]); break; case 'd': case 'j': ptr = int2str(ptr,slen,timeparts[TP_DAY]); break; case 'H': ptr = int2str(ptr,slen,timeparts[TP_HOUR]); break; case 'I': ptr = int2str(ptr,slen,timeparts[TP_HOUR] % 12); break; case 'M': ptr = int2str(ptr,slen,timeparts[TP_MINUTE]); break; case 'S': ptr = int2str(ptr,slen,timeparts[TP_SECOND]); break; case 'f': ptr = int2str(ptr,slen,timeparts[TP_SUBSECOND]); break; case 'p': if( timeparts[TP_HOUR] <= 12 ) memcpy(ptr,"AM",2); else memcpy(ptr,"PM",2); ptr += 2; break; case 'z': break; // do nothing for this one case 'Z': memcpy(ptr,"UTC",3); ptr += 3; break; default: break; } } return ptr; } __device__ void operator()( unsigned int idx ) { if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) { d_strings[idx] = nullptr; return; } long timestamp = d_timestamps[idx]; int timeparts[TP_ARRAYSIZE] = {0}; dissect_timestamp(timestamp,timeparts); // convert to characters char* str = d_buffer + d_offsets[idx]; char* ptr = format_from_parts(timeparts,str); int len = (int)(ptr - str); d_strings[idx] = custring_view::create_from(str,str,len); } }; NVStrings* NVStrings::long2timestamp( const unsigned long* values, unsigned int count, timestamp_units units, const char* format, const unsigned char* nullbitmask, bool bdevmem ) { if( values==0 || count==0 ) throw std::invalid_argument("nvstrings::long2timestamp values or count invalid"); auto execpol = rmm::exec_policy(0); NVStrings* rtn = new NVStrings(count); unsigned long* d_values = (unsigned long*)values; unsigned char* d_nulls = (unsigned char*)nullbitmask; if( !bdevmem ) { d_values = device_alloc<unsigned long>(count,0); CUDA_TRY( cudaMemcpyAsync(d_values,values,count*sizeof(unsigned long),cudaMemcpyHostToDevice)) if( nullbitmask ) { d_nulls = device_alloc<unsigned char>(((count+7)/8),0); CUDA_TRY( cudaMemcpyAsync(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),cudaMemcpyHostToDevice)) } } if( format==0 ) format = "%Y-%m-%dT%H:%M:%SZ"; size_t length = strlen(format); DTFormatCompiler dtc(format,length,units); DTProgram* prog = dtc.compile_to_device(); // compute size of memory we'll need // each string will be the same size with the length int d_size = custring_view::alloc_size(dtc.string_template(),dtc.string_length()); // we only need to account for any null strings rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_values, d_size, d_nulls, d_sizes] __device__ (unsigned int idx) { if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) d_sizes[idx] = 0; else d_sizes[idx] = ALIGN_SIZE(d_size); }); rmm::device_vector<size_t> offsets(count,0); size_t* d_offsets = offsets.data().get(); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // build iso8601 strings from timestamps char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); custring_view_array d_strings = rtn->pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, datetime_formatter(prog, units,d_buffer, d_offsets, d_nulls, d_values, d_strings)); // if( !bdevmem ) RMM_FREE(d_values,0); return rtn; }
b692f35c941abd8ad9f3a66030e442c069c4803f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> // CUDA includes #include <cutil.h> #include <cutil_math.h> // own stuff #include "cuda_wrappers.h" // kernel implementations #include "k_grayscale.cu" #include "k_weightmap.cu" #include "k_gaussian.cu" #include "k_laplace.cu" #include "k_combine.cu" #include "k_collapse.cu" #include "k_copy.cu" #include "k_linear.hip" #include "k_simpleEnfuse.cu" #include "cuda_wrappers.h" #include "filter_wrappers.h" /* * GLOBAL VARIABLES */ // configure the grid layout const int blocksize_x = 8; const int blocksize_y = 8; // cuda channel descriptor hipChannelFormatDesc uchar4tex = hipCreateChannelDesc<uchar4>(); /* * Upload the given RGB structure given by a uchar4 pointer into a hipArray in * the GPU's memory and return a pointer to the array on the GPU's memory. */ hipArray* uploadToArray(const uchar4 *source, int width, int height) { return cuda_uploadToArray(&uchar4tex, width, height, source); } float4 *genSimpleEnfuse(float4 *target, hipArray* source, float *weight, int width, int height) { int memSize_weight = width * height * sizeof(float); // make sources available as texture cuda_bindTexture(&uchar4tex, source, "en_tex_image"); CUDA_SAFE_CALL(hipBindTexture(0, en_tex_image_weight, weight, memSize_weight)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( simpleEnfuse) , dim3(grid), dim3(threads) , 0, 0, target, width, height); printCudaError(stderr, hipGetLastError(), "simpleEnfuse"); // cleanup CUDA_SAFE_CALL(hipUnbindTexture(en_tex_image)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); CUDA_SAFE_CALL(hipUnbindTexture(en_tex_image_weight)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float4 *genAppliedWeightmap(hipArray* source, float *weight, int width, int height) { float4 *target; int memSize_target = width * height * sizeof(float4); int memSize_weight = width * height * sizeof(float); // allocate graphics memory for target CUDA_SAFE_CALL(hipMalloc((void**) &target, memSize_target)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture cuda_bindTexture(&uchar4tex, source, "en_tex_image"); CUDA_SAFE_CALL(hipBindTexture(0, en_tex_image_weight, weight, memSize_weight)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( applyWeightmap) , dim3(grid), dim3(threads) , 0, 0, target, width, height); printCudaError(stderr, hipGetLastError(), "applyWeightmap"); // cleanup CUDA_SAFE_CALL(hipUnbindTexture(en_tex_image)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); CUDA_SAFE_CALL(hipUnbindTexture(en_tex_image_weight)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float* genGrayscale(const hipArray *source, int width, int height) { float *target = cuda_malloc<float>(width, height); cuda_bindTexture(&uchar4tex, source, "g_tex_image"); // kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); hipLaunchKernelGGL(( grayscale) , dim3(grid), dim3(threads) , 0, 0, target, width, height); printCudaError(stderr, hipGetLastError(), "grayscale"); cuda_unbindTexture("g_tex_image"); return target; } float *genWeightmap(const hipArray *image_source, const float *gray_source, int width, int height) { float *target = cuda_malloc<float>(width, height); cuda_bindTexture(&uchar4tex, image_source, "w_tex_image"); cuda_bindTexture<float>(gray_source, width, height, "w_tex_gray"); // kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); hipLaunchKernelGGL(( weightmap) , dim3(grid), dim3(threads) , 0, 0, target, width, height); printCudaError(stderr, hipGetLastError(), "weightmap"); cuda_unbindTexture("w_tex_image"); cuda_unbindTexture("w_tex_gray"); return target; } float4 *genGaussDown(float4 *source, int *width, int *height) { float4 *target; int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO int target_memSize = target_width * target_height * sizeof(float4); int source_memSize = (*width) * (*height) * sizeof(float4); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(hipMalloc((void**) &target, target_memSize)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture CUDA_SAFE_CALL(hipBindTexture(0, ga_tex_image, source, source_memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(target_width/blocksize_x) + 1,floor(target_height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( gaussDown) , dim3(grid), dim3(threads) , 0, 0, target, target_width, target_height); printCudaError(stderr, hipGetLastError(), "gauss"); // update new image dimensions *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(hipUnbindTexture(ga_tex_image)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float4 *toFloat4(hipArray *source, int width, int height) { float4 *target; // allocate graphics memory for target int memSize_target = width * height * sizeof(float4); CUDA_SAFE_CALL(hipMalloc((void**) &target, memSize_target)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture CUDA_SAFE_CALL(hipBindTextureToArray(tf_tex_source, source)); printCudaError(stderr, hipGetLastError(), "hipBindTextureToArray"); // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( copyUcharToFloat) , dim3(grid), dim3(threads) , 0, 0, target, width, height); printCudaError(stderr, hipGetLastError(), "copyUcharToFloat"); // cleanup CUDA_SAFE_CALL(hipUnbindTexture(tf_tex_source)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float *genGaussDown_f(float *source, int *width, int *height) { float *target; int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO int target_memSize = target_width * target_height * sizeof(float); int source_memSize = (*width) * (*height) * sizeof(float); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(hipMalloc((void**) &target, target_memSize)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture CUDA_SAFE_CALL(hipBindTexture(0, ga_tex_image_f, source, source_memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(target_width/blocksize_x) + 1,floor(target_height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( gaussDown_f) , dim3(grid), dim3(threads) , 0, 0, target, target_width, target_height); printCudaError(stderr, hipGetLastError(), "gaussDown_f"); // update new image dimensions *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(hipUnbindTexture(ga_tex_image)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float *genLinearDown_f(float *source, int *width, int *height) { float *target; int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO int target_memSize = target_width * target_height * sizeof(float); int source_memSize = (*width) * (*height) * sizeof(float); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(hipMalloc((void**) &target, target_memSize)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture CUDA_SAFE_CALL(hipBindTexture(0, dummy, source, source_memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(target_width/blocksize_x) + 1,floor(target_height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( linearDown_f) , dim3(grid), dim3(threads) , 0, 0, target, target_width, target_height); printCudaError(stderr, hipGetLastError(), "linearDown_f"); // update new image dimensions *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(hipUnbindTexture(dummy)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float4 *genGaussUp(float4 *source, int *width, int *height) { float4 *target; int target_width = floor(*width * 2); // TODO int target_height = floor(*height * 2); // TODO int target_memSize = target_width * target_height * sizeof(float4); int source_memSize = (*width) * (*height) * sizeof(float4); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(hipMalloc((void**) &target, target_memSize)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture CUDA_SAFE_CALL(hipBindTexture(0, ga_tex_image, source, source_memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( gaussUp) , dim3(grid), dim3(threads) , 0, 0, target, *width, *height); printCudaError(stderr, hipGetLastError(), "gauss"); // update new image dimensions *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(hipUnbindTexture(ga_tex_image)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float4 *genLaplace(float4 *source_down, float4 *source_up, int *width, int *height) { float4 *target; int memSize = (*width) * (*height) * sizeof(float4); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(hipMalloc((void**) &target, memSize)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture CUDA_SAFE_CALL(hipBindTexture(0, la_tex_image_down, source_down, memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); CUDA_SAFE_CALL(hipBindTexture(0, la_tex_image_up, source_up, memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( laplace) , dim3(grid), dim3(threads) , 0, 0, target, *width, *height); printCudaError(stderr, hipGetLastError(), "laplace"); // FIXME // update new image dimensions int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(hipUnbindTexture(la_tex_image_up)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); CUDA_SAFE_CALL(hipUnbindTexture(la_tex_image_down)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float4 *genWeightedLaplace(float4 *source_laplace, float *source_weight, int *width, int *height) { float4 *target; int memSize_laplace = (*width) * (*height) * sizeof(float4); int memSize_weight = (*width) * (*height) * sizeof(float); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(hipMalloc((void**) &target, memSize_laplace)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture CUDA_SAFE_CALL(hipBindTexture(0, la_tex_image_laplace, source_laplace, memSize_laplace)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); CUDA_SAFE_CALL(hipBindTexture(0, la_tex_image_weight, source_weight, memSize_weight)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( weightedLaplace) , dim3(grid), dim3(threads) , 0, 0, target, *width, *height); printCudaError(stderr, hipGetLastError(), "weightedLaplace"); // FIXME // update new image dimensions int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(hipUnbindTexture(la_tex_image_up)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); CUDA_SAFE_CALL(hipUnbindTexture(la_tex_image_down)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } void normWeightmaps(float *map_1, float *map_2, int width, int height) { // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( normalizeWeightmaps) , dim3(grid), dim3(threads) , 0, 0, map_1, map_2, width, height); printCudaError(stderr, hipGetLastError(), "normalizeWeightmaps"); } void normWeightmaps(float **maps, int numMaps, int width, int height) { // sum up all given weightmaps to the target float *target = cuda_memcpy<float>(maps[0], width, height); for (int i = 1; i < numMaps; i++) { // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( addToTarget) , dim3(grid), dim3(threads) , 0, 0, target, maps[i], width, height); printCudaError(stderr, hipGetLastError(), "addToTarget"); } // normalize all given weightmaps with the calculated divisors cuda_bindTexture<float>(target, width, height, "w_tex_divisor"); for (int i = 0; i < numMaps; i++) { // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( normalizeWeightmap) , dim3(grid), dim3(threads) , 0, 0, maps[i], width, height); printCudaError(stderr, hipGetLastError(), "normalizeWeightmap"); } cuda_unbindTexture("w_tex_divisor"); hipFree(target); } float4 *genAdded(float4* source_1, float4 *source_2, int *width, int *height) { float4 *target; int memSize = (*width) * (*height) * sizeof(float4); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(hipMalloc((void**) &target, memSize)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture CUDA_SAFE_CALL(hipBindTexture(0, a_tex_image_1, source_1, memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); CUDA_SAFE_CALL(hipBindTexture(0, a_tex_image_2, source_2, memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( add) , dim3(grid), dim3(threads) , 0, 0, target, *width, *height); printCudaError(stderr, hipGetLastError(), "laplace"); // FIXME // update new image dimensions int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(hipUnbindTexture(a_tex_image_1)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); CUDA_SAFE_CALL(hipUnbindTexture(a_tex_image_2)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float *genAdded(float* source_1, float *source_2, int *width, int *height) { float *target; int memSize = (*width) * (*height) * sizeof(float); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(hipMalloc((void**) &target, memSize)); printCudaError(stderr, hipGetLastError(), "hipMalloc"); // make sources available as texture CUDA_SAFE_CALL(hipBindTexture(0, af_tex_image_1, source_1, memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); CUDA_SAFE_CALL(hipBindTexture(0, af_tex_image_2, source_2, memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( add_f) , dim3(grid), dim3(threads) , 0, 0, target, *width, *height); printCudaError(stderr, hipGetLastError(), "laplace"); // cleanup CUDA_SAFE_CALL(hipUnbindTexture(af_tex_image_1)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); CUDA_SAFE_CALL(hipUnbindTexture(af_tex_image_2)); printCudaError(stderr, hipGetLastError(), "hipUnbindTexture"); return target; } float4 *genCollapsed(float4 *src_big, float4 *src_small, int *width, int *height) { int memSize = (*width) * (*height) * sizeof(float4); // make sources available as texture CUDA_SAFE_CALL(hipBindTexture(0, c_tex_image, src_small, memSize)); printCudaError(stderr, hipGetLastError(), "hipBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel hipLaunchKernelGGL(( collapse) , dim3(grid), dim3(threads) , 0, 0, src_big, *width, *height); printCudaError(stderr, hipGetLastError(), "laplace"); *width = floor(*width * 2); // TODO *height = floor(*height * 2); // TODO return src_big; }
b692f35c941abd8ad9f3a66030e442c069c4803f.cu
#include <stdio.h> // CUDA includes #include <cutil.h> #include <cutil_math.h> // own stuff #include "cuda_wrappers.h" // kernel implementations #include "k_grayscale.cu" #include "k_weightmap.cu" #include "k_gaussian.cu" #include "k_laplace.cu" #include "k_combine.cu" #include "k_collapse.cu" #include "k_copy.cu" #include "k_linear.cu" #include "k_simpleEnfuse.cu" #include "cuda_wrappers.h" #include "filter_wrappers.h" /* * GLOBAL VARIABLES */ // configure the grid layout const int blocksize_x = 8; const int blocksize_y = 8; // cuda channel descriptor cudaChannelFormatDesc uchar4tex = cudaCreateChannelDesc<uchar4>(); /* * Upload the given RGB structure given by a uchar4 pointer into a cudaArray in * the GPU's memory and return a pointer to the array on the GPU's memory. */ cudaArray* uploadToArray(const uchar4 *source, int width, int height) { return cuda_uploadToArray(&uchar4tex, width, height, source); } float4 *genSimpleEnfuse(float4 *target, cudaArray* source, float *weight, int width, int height) { int memSize_weight = width * height * sizeof(float); // make sources available as texture cuda_bindTexture(&uchar4tex, source, "en_tex_image"); CUDA_SAFE_CALL(cudaBindTexture(0, en_tex_image_weight, weight, memSize_weight)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel simpleEnfuse <<< grid, threads >>>(target, width, height); printCudaError(stderr, cudaGetLastError(), "simpleEnfuse"); // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(en_tex_image)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); CUDA_SAFE_CALL(cudaUnbindTexture(en_tex_image_weight)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float4 *genAppliedWeightmap(cudaArray* source, float *weight, int width, int height) { float4 *target; int memSize_target = width * height * sizeof(float4); int memSize_weight = width * height * sizeof(float); // allocate graphics memory for target CUDA_SAFE_CALL(cudaMalloc((void**) &target, memSize_target)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture cuda_bindTexture(&uchar4tex, source, "en_tex_image"); CUDA_SAFE_CALL(cudaBindTexture(0, en_tex_image_weight, weight, memSize_weight)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel applyWeightmap <<< grid, threads >>>(target, width, height); printCudaError(stderr, cudaGetLastError(), "applyWeightmap"); // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(en_tex_image)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); CUDA_SAFE_CALL(cudaUnbindTexture(en_tex_image_weight)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float* genGrayscale(const cudaArray *source, int width, int height) { float *target = cuda_malloc<float>(width, height); cuda_bindTexture(&uchar4tex, source, "g_tex_image"); // kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); grayscale <<< grid, threads >>>(target, width, height); printCudaError(stderr, cudaGetLastError(), "grayscale"); cuda_unbindTexture("g_tex_image"); return target; } float *genWeightmap(const cudaArray *image_source, const float *gray_source, int width, int height) { float *target = cuda_malloc<float>(width, height); cuda_bindTexture(&uchar4tex, image_source, "w_tex_image"); cuda_bindTexture<float>(gray_source, width, height, "w_tex_gray"); // kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); weightmap <<< grid, threads >>>(target, width, height); printCudaError(stderr, cudaGetLastError(), "weightmap"); cuda_unbindTexture("w_tex_image"); cuda_unbindTexture("w_tex_gray"); return target; } float4 *genGaussDown(float4 *source, int *width, int *height) { float4 *target; int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO int target_memSize = target_width * target_height * sizeof(float4); int source_memSize = (*width) * (*height) * sizeof(float4); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(cudaMalloc((void**) &target, target_memSize)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture CUDA_SAFE_CALL(cudaBindTexture(0, ga_tex_image, source, source_memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(target_width/blocksize_x) + 1,floor(target_height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel gaussDown <<< grid, threads >>>(target, target_width, target_height); printCudaError(stderr, cudaGetLastError(), "gauss"); // update new image dimensions *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(ga_tex_image)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float4 *toFloat4(cudaArray *source, int width, int height) { float4 *target; // allocate graphics memory for target int memSize_target = width * height * sizeof(float4); CUDA_SAFE_CALL(cudaMalloc((void**) &target, memSize_target)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture CUDA_SAFE_CALL(cudaBindTextureToArray(tf_tex_source, source)); printCudaError(stderr, cudaGetLastError(), "cudaBindTextureToArray"); // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel copyUcharToFloat <<< grid, threads >>>(target, width, height); printCudaError(stderr, cudaGetLastError(), "copyUcharToFloat"); // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(tf_tex_source)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float *genGaussDown_f(float *source, int *width, int *height) { float *target; int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO int target_memSize = target_width * target_height * sizeof(float); int source_memSize = (*width) * (*height) * sizeof(float); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(cudaMalloc((void**) &target, target_memSize)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture CUDA_SAFE_CALL(cudaBindTexture(0, ga_tex_image_f, source, source_memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(target_width/blocksize_x) + 1,floor(target_height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel gaussDown_f <<< grid, threads >>>(target, target_width, target_height); printCudaError(stderr, cudaGetLastError(), "gaussDown_f"); // update new image dimensions *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(ga_tex_image)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float *genLinearDown_f(float *source, int *width, int *height) { float *target; int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO int target_memSize = target_width * target_height * sizeof(float); int source_memSize = (*width) * (*height) * sizeof(float); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(cudaMalloc((void**) &target, target_memSize)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture CUDA_SAFE_CALL(cudaBindTexture(0, dummy, source, source_memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(target_width/blocksize_x) + 1,floor(target_height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel linearDown_f <<< grid, threads >>>(target, target_width, target_height); printCudaError(stderr, cudaGetLastError(), "linearDown_f"); // update new image dimensions *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(dummy)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float4 *genGaussUp(float4 *source, int *width, int *height) { float4 *target; int target_width = floor(*width * 2); // TODO int target_height = floor(*height * 2); // TODO int target_memSize = target_width * target_height * sizeof(float4); int source_memSize = (*width) * (*height) * sizeof(float4); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(cudaMalloc((void**) &target, target_memSize)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture CUDA_SAFE_CALL(cudaBindTexture(0, ga_tex_image, source, source_memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel gaussUp <<< grid, threads >>>(target, *width, *height); printCudaError(stderr, cudaGetLastError(), "gauss"); // update new image dimensions *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(ga_tex_image)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float4 *genLaplace(float4 *source_down, float4 *source_up, int *width, int *height) { float4 *target; int memSize = (*width) * (*height) * sizeof(float4); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(cudaMalloc((void**) &target, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture CUDA_SAFE_CALL(cudaBindTexture(0, la_tex_image_down, source_down, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); CUDA_SAFE_CALL(cudaBindTexture(0, la_tex_image_up, source_up, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel laplace <<< grid, threads >>>(target, *width, *height); printCudaError(stderr, cudaGetLastError(), "laplace"); // FIXME // update new image dimensions int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(la_tex_image_up)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); CUDA_SAFE_CALL(cudaUnbindTexture(la_tex_image_down)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float4 *genWeightedLaplace(float4 *source_laplace, float *source_weight, int *width, int *height) { float4 *target; int memSize_laplace = (*width) * (*height) * sizeof(float4); int memSize_weight = (*width) * (*height) * sizeof(float); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(cudaMalloc((void**) &target, memSize_laplace)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture CUDA_SAFE_CALL(cudaBindTexture(0, la_tex_image_laplace, source_laplace, memSize_laplace)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); CUDA_SAFE_CALL(cudaBindTexture(0, la_tex_image_weight, source_weight, memSize_weight)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel weightedLaplace <<< grid, threads >>>(target, *width, *height); printCudaError(stderr, cudaGetLastError(), "weightedLaplace"); // FIXME // update new image dimensions int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(la_tex_image_up)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); CUDA_SAFE_CALL(cudaUnbindTexture(la_tex_image_down)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } void normWeightmaps(float *map_1, float *map_2, int width, int height) { // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel normalizeWeightmaps <<< grid, threads >>>(map_1, map_2, width, height); printCudaError(stderr, cudaGetLastError(), "normalizeWeightmaps"); } void normWeightmaps(float **maps, int numMaps, int width, int height) { // sum up all given weightmaps to the target float *target = cuda_memcpy<float>(maps[0], width, height); for (int i = 1; i < numMaps; i++) { // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel addToTarget <<< grid, threads >>>(target, maps[i], width, height); printCudaError(stderr, cudaGetLastError(), "addToTarget"); } // normalize all given weightmaps with the calculated divisors cuda_bindTexture<float>(target, width, height, "w_tex_divisor"); for (int i = 0; i < numMaps; i++) { // prepare kernel execution dim3 grid(floor(width/blocksize_x) + 1,floor(height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel normalizeWeightmap <<< grid, threads >>>(maps[i], width, height); printCudaError(stderr, cudaGetLastError(), "normalizeWeightmap"); } cuda_unbindTexture("w_tex_divisor"); cudaFree(target); } float4 *genAdded(float4* source_1, float4 *source_2, int *width, int *height) { float4 *target; int memSize = (*width) * (*height) * sizeof(float4); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(cudaMalloc((void**) &target, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture CUDA_SAFE_CALL(cudaBindTexture(0, a_tex_image_1, source_1, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); CUDA_SAFE_CALL(cudaBindTexture(0, a_tex_image_2, source_2, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel add <<< grid, threads >>>(target, *width, *height); printCudaError(stderr, cudaGetLastError(), "laplace"); // FIXME // update new image dimensions int target_width = floor(*width / 2); // TODO int target_height = floor(*height / 2); // TODO *width = target_width; *height = target_height; // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(a_tex_image_1)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); CUDA_SAFE_CALL(cudaUnbindTexture(a_tex_image_2)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float *genAdded(float* source_1, float *source_2, int *width, int *height) { float *target; int memSize = (*width) * (*height) * sizeof(float); // printf("w: %i, h: %i\n", *width, *height); // allocate graphics memory for target CUDA_SAFE_CALL(cudaMalloc((void**) &target, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaMalloc"); // make sources available as texture CUDA_SAFE_CALL(cudaBindTexture(0, af_tex_image_1, source_1, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); CUDA_SAFE_CALL(cudaBindTexture(0, af_tex_image_2, source_2, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel add_f <<< grid, threads >>>(target, *width, *height); printCudaError(stderr, cudaGetLastError(), "laplace"); // cleanup CUDA_SAFE_CALL(cudaUnbindTexture(af_tex_image_1)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); CUDA_SAFE_CALL(cudaUnbindTexture(af_tex_image_2)); printCudaError(stderr, cudaGetLastError(), "cudaUnbindTexture"); return target; } float4 *genCollapsed(float4 *src_big, float4 *src_small, int *width, int *height) { int memSize = (*width) * (*height) * sizeof(float4); // make sources available as texture CUDA_SAFE_CALL(cudaBindTexture(0, c_tex_image, src_small, memSize)); printCudaError(stderr, cudaGetLastError(), "cudaBindTexture"); // prepare kernel execution dim3 grid(floor(*width/blocksize_x) + 1,floor(*height/blocksize_y) + 1); dim3 threads(blocksize_x, blocksize_y); // execute kernel collapse <<< grid, threads >>>(src_big, *width, *height); printCudaError(stderr, cudaGetLastError(), "laplace"); *width = floor(*width * 2); // TODO *height = floor(*height * 2); // TODO return src_big; }
68b6dc5182af7a5400ee71d582b868c0a22a68ed.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
68b6dc5182af7a5400ee71d582b868c0a22a68ed.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
3bd159facc10c5b9b6799845aef622598d6a654f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <iostream> #include <hip/hip_runtime.h> #include <rocblas.h> #include <time.h> // 3*4 // // a11 a12 a13 a14 // a21 a22 a23 a24 // a31 a32 a33 a34 // // a11 a12 a13 a14 a21 a22 a23 a24 a31 a32 a33 a34 // // // cuBLAS // // a11 a21 a31 a12 a22 a32 a13 a23 a33 a14 a24 a34 // . // // (,column) // cuBLAS (, row) . // #define IDX2C(i,j,Id) (((j)*(Id))+(i)) // j -> row, i -> column using namespace std; bool ChoseGpuAvailable(int n) { int devicesCount; hipGetDeviceCount(&devicesCount); cout<<"devicesCount : "<<devicesCount<<endl; for(int i = 0 ; i < devicesCount ; i++) { hipDeviceProp_t deviceProperties; hipGetDeviceProperties(&deviceProperties,i); cout<<"----- device "<<i<<" -----"<<endl; cout<<"device name : "<<deviceProperties.name<<endl; cout<<"maxThreadsPerBlock : "<<deviceProperties.maxThreadsPerBlock<<endl; cout<<"warpSize : "<<deviceProperties.warpSize<<endl; } if(n > devicesCount && n < 0) return false; else { hipSetDevice(n); return true; } } int main(int argc, char** argv) { unsigned int m = 6; //row unsigned int n = 5; //column float *matrix1, *vector1, *vector2; clock_t t; int host2device_time, device2host_time,GPU_time; matrix1 = new float[m*n]; vector1 = new float[n]; vector2 = new float[m*3]; hipError_t cudaStat; hipblasStatus_t stat; hipblasHandle_t handle; ChoseGpuAvailable(1); // int ind = 11; for(int x = 0 ; x < n ; x++) { for(int y = 0 ; y < m ; y++) { matrix1[IDX2C(y,x,m)] = (float)ind++; } } for(int y = 0 ; y < n ; y++) vector1[y] = 1.0f; for(int y = 0 ; y < m*3 ; y++) vector2[y] = 0.0f; cout<<"maxtrix1"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix1[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"[vector1]^T"<<endl; for(int y = 0 ; y < n ; y++) cout<<vector1[y]<<" "; cout<<endl; cout<<"[vector2]^T"<<endl; for(int y = 0 ; y < m*3 ; y++) cout<<vector2[y]<<" "; cout<<endl; //cuda float *d_matrix1, *d_vector1, *d_vector2; hipMalloc(&d_matrix1,n*m*sizeof(float)); hipMalloc(&d_vector1,n*sizeof(float)); hipMalloc(&d_vector2,m*3*sizeof(float)); // memory -> cuda memory t = clock(); hipblasCreate(&handle); hipblasSetMatrix(m,n,sizeof(float),matrix1,m,d_matrix1,m); hipblasSetVector(n,sizeof(float),vector1,1,d_vector1,1); hipblasSetVector(m*3,sizeof(float),vector2,1,d_vector2,1); host2device_time = clock()-t; // ( ) float al=1.0f; float bet=1.0f; t = clock(); //stat = hipblasSgemv(handle, HIPBLAS_OP_N,m,n, &al, d_matrix1,m, d_vector1,1, &bet, d_vector2,1); // // (0,0) //3,4 . // //CUBLAS_OP_N //CUBLAS_OP_T transpose. // // stat = hipblasSgemv(handle, HIPBLAS_OP_T,m-2,n-2, &al, d_matrix1,m, d_vector1,1, &bet, d_vector2+m,1); GPU_time = clock() - t; //cuda memory -> memory t= clock(); hipblasGetMatrix(m,n,sizeof(float),d_matrix1,m,matrix1,m); hipblasGetVector(n,sizeof(float),d_vector1,1,vector1,1); hipblasGetVector(m*3,sizeof(float),d_vector2,1,vector2,1); device2host_time = clock() - t; // cout<<"maxtrix1"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix1[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"[vector1]^T"<<endl; for(int y = 0 ; y < n ; y++) cout<<vector1[y]<<" "; cout<<endl; cout<<"[vector2]^T"<<endl; for(int y = 0 ; y < m*3 ; y++) cout<<vector2[y]<<" "; cout<<endl; cout<<"host to device time : "<<host2device_time<<endl; cout<<"GPU time : "<<GPU_time<<endl; cout<<"device to host time : "<<device2host_time<<endl; //cuda hipFree(d_matrix1); hipFree(d_vector1); hipFree(d_vector2); hipblasDestroy(handle); delete matrix1; delete vector1; delete vector2; return 0; }
3bd159facc10c5b9b6799845aef622598d6a654f.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda_runtime.h> #include <cublas_v2.h> #include <time.h> //보통 행렬은 3*4 행렬이면 // // a11 a12 a13 a14 // a21 a22 a23 a24 // a31 a32 a33 a34 // // a11 a12 a13 a14 a21 a22 a23 a24 a31 a32 a33 a34 // 위와 같이 저장하지만 // // cuBLAS에서는 // // a11 a21 a31 a12 a22 a32 a13 a23 a33 a14 a24 a34 // 위와 같이 저장된다. // // 보통 열(세로,column)의 수 기준으로 저장하지만 // cuBLAS에서는 행(가로, row)의 수 기준으로 저장한다. // #define IDX2C(i,j,Id) (((j)*(Id))+(i)) // j -> row, i -> column using namespace std; bool ChoseGpuAvailable(int n) { int devicesCount; cudaGetDeviceCount(&devicesCount); cout<<"devicesCount : "<<devicesCount<<endl; for(int i = 0 ; i < devicesCount ; i++) { cudaDeviceProp deviceProperties; cudaGetDeviceProperties(&deviceProperties,i); cout<<"----- device "<<i<<" -----"<<endl; cout<<"device name : "<<deviceProperties.name<<endl; cout<<"maxThreadsPerBlock : "<<deviceProperties.maxThreadsPerBlock<<endl; cout<<"warpSize : "<<deviceProperties.warpSize<<endl; } if(n > devicesCount && n < 0) return false; else { cudaSetDevice(n); return true; } } int main(int argc, char** argv) { unsigned int m = 6; //row unsigned int n = 5; //column float *matrix1, *vector1, *vector2; clock_t t; int host2device_time, device2host_time,GPU_time; matrix1 = new float[m*n]; vector1 = new float[n]; vector2 = new float[m*3]; cudaError_t cudaStat; cublasStatus_t stat; cublasHandle_t handle; ChoseGpuAvailable(1); //데이터 초기화 int ind = 11; for(int x = 0 ; x < n ; x++) { for(int y = 0 ; y < m ; y++) { matrix1[IDX2C(y,x,m)] = (float)ind++; } } for(int y = 0 ; y < n ; y++) vector1[y] = 1.0f; for(int y = 0 ; y < m*3 ; y++) vector2[y] = 0.0f; cout<<"maxtrix1"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix1[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"[vector1]^T"<<endl; for(int y = 0 ; y < n ; y++) cout<<vector1[y]<<" "; cout<<endl; cout<<"[vector2]^T"<<endl; for(int y = 0 ; y < m*3 ; y++) cout<<vector2[y]<<" "; cout<<endl; //cuda 메모리 할당 float *d_matrix1, *d_vector1, *d_vector2; cudaMalloc(&d_matrix1,n*m*sizeof(float)); cudaMalloc(&d_vector1,n*sizeof(float)); cudaMalloc(&d_vector2,m*3*sizeof(float)); // memory -> cuda memory t = clock(); cublasCreate(&handle); cublasSetMatrix(m,n,sizeof(float),matrix1,m,d_matrix1,m); cublasSetVector(n,sizeof(float),vector1,1,d_vector1,1); cublasSetVector(m*3,sizeof(float),vector2,1,d_vector2,1); host2device_time = clock()-t; // 연산 (커널 실행) float al=1.0f; float bet=1.0f; t = clock(); //stat = cublasSgemv(handle, CUBLAS_OP_N,m,n, &al, d_matrix1,m, d_vector1,1, &bet, d_vector2,1); // //먼저 주소 값의 바꿔주면서 행렬의 (0,0)의 위치를 바꿔주고 //3,4번째 파라메터로 행렬의 최종 크기를 정해준다. // //CUBLAS_OP_N은 아무것도 안한것 //CUBLAS_OP_T는 transpose한것이다. // // stat = cublasSgemv(handle, CUBLAS_OP_T,m-2,n-2, &al, d_matrix1,m, d_vector1,1, &bet, d_vector2+m,1); GPU_time = clock() - t; //cuda memory -> memory t= clock(); cublasGetMatrix(m,n,sizeof(float),d_matrix1,m,matrix1,m); cublasGetVector(n,sizeof(float),d_vector1,1,vector1,1); cublasGetVector(m*3,sizeof(float),d_vector2,1,vector2,1); device2host_time = clock() - t; //결과 확인 cout<<"maxtrix1"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix1[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"[vector1]^T"<<endl; for(int y = 0 ; y < n ; y++) cout<<vector1[y]<<" "; cout<<endl; cout<<"[vector2]^T"<<endl; for(int y = 0 ; y < m*3 ; y++) cout<<vector2[y]<<" "; cout<<endl; cout<<"host to device time : "<<host2device_time<<endl; cout<<"GPU time : "<<GPU_time<<endl; cout<<"device to host time : "<<device2host_time<<endl; //cuda 메모리 해제 cudaFree(d_matrix1); cudaFree(d_vector1); cudaFree(d_vector2); cublasDestroy(handle); delete matrix1; delete vector1; delete vector2; return 0; }
a555e34aa8208abed57505c1d86d4f300516b911.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* * This sample implements Mersenne Twister random number generator * and Cartesian Box-Muller transformation on the GPU. * See supplied whitepaper for more explanations. */ #include "MersenneTwister.h" /////////////////////////////////////////////////////////////////////////////// // Common host and device function /////////////////////////////////////////////////////////////////////////////// //ceil(a / b) extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } //floor(a / b) extern "C" int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b extern "C" int iAlignUp(int a, int b){ return ((a % b) != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b extern "C" int iAlignDown(int a, int b){ return a - a % b; } /////////////////////////////////////////////////////////////////////////////// // Data configuration /////////////////////////////////////////////////////////////////////////////// static int PATH_N_GAUSS; static int PATH_N_UNIF; static int N_PER_RNG_GAUSS; static int N_PER_RNG_UNIF; static int RAND_N_GAUSS; static int RAND_N_UNIF; __device__ mt_struct_stripped ds_MT[MT_RNG_COUNT]; static mt_struct_stripped h_MT[MT_RNG_COUNT]; __device__ unsigned int d_mtstatus[MT_RNG_COUNT][MT_NN]; // fields for the MT random number generator //__device__ float * dev_rndunif_field; //__device__ float * dev_rndgauss_field; //Load twister configurations void loadMTGPU(const char *fname){ FILE *fd = fopen(fname, "rb"); if(!fd){ printf("initMTGPU(): failed to open %s\n", fname); printf("FAILED\n"); exit(0); } if( !fread(h_MT, sizeof(h_MT), 1, fd) ){ printf("initMTGPU(): failed to load %s\n", fname); printf("FAILED\n"); exit(0); } fclose(fd); } //Initialize/seed twister for current GPU context void seedMTGPU(){ int i; //Need to be thread-safe mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped)); /* initialize poor rng: */ srand ( time(NULL) ); /* initialize MT rng seeds */ for(i = 0; i < MT_RNG_COUNT; i++){ MT[i] = h_MT[i]; MT[i].seed = (unsigned int) rand(); } CUDA_SAFE_CALL( hipMemcpyToSymbol(ds_MT, MT, sizeof(h_MT)) ); free(MT); } //Save twister for current GPU context void saveMTGPU(const char *fname){ FILE *fd = fopen(fname, "w"); if(!fd){ printf("saveMTGPU(): failed to open %s\n", fname); printf("FAILED\n"); exit(0); } fwrite(h_MT, sizeof(h_MT), 1, fd); fclose(fd); } //////////////////////////////////////////////////////////////////////////////// // Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random. // For coalesced global writes MT_RNG_COUNT should be a multiple of warp size. // Initial states for each generator are the same, since the states are // initialized from the global seed. In order to improve distribution properties // on small NPerRng supply dedicated (local) seed to each twister. // The local seeds, in their turn, can be extracted from global seed // by means of any simple random number generator, like LCG. //////////////////////////////////////////////////////////////////////////////// __global__ void RandomGPU( float *d_Random, int NPerRng, int initialized ){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; int iState, iState1, iStateM, iOut; unsigned int mti, mti1, mtiM, x; unsigned int mt[MT_NN]; for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){ //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; if(!initialized){ // initialize seed and construct status mt must be initialized from host before mt[0] = ds_MT[iRng].seed; for(iState = 1; iState < MT_NN; iState++) mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK; } else{ for(iState = 0; iState < MT_NN; iState++) mt[iState] = d_mtstatus[iRng][iState]; } iState = 0; mti1 = mt[0]; for(iOut = 0; iOut < NPerRng; iOut++){ //iState1 = (iState + 1) % MT_NN //iStateM = (iState + MT_MM) % MT_NN iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //Convert to (0, 1] float and write to global memory d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f; } // save status of mt ds_MT[iRng].seed = mt[0]; for(iState = 0; iState < MT_NN; iState++) d_mtstatus[iRng][iState] = mt[iState]; } } //////////////////////////////////////////////////////////////////////////////// // Transform each of MT_RNG_COUNT lanes of NPerRng uniformly distributed // random samples, produced by RandomGPU(), to normally distributed lanes // using Cartesian form of Box-Muller transformation. // NPerRng must be even. //////////////////////////////////////////////////////////////////////////////// #define PIf 3.14159265358979f __device__ inline void BoxMuller(float& u1, float& u2){ float r = sqrtf(-2.0f * logf(u1)); float phi = 2 * PIf * u2; u1 = r * __cosf(phi); u2 = r * __sinf(phi); } __global__ void BoxMullerGPU(float *d_Random, int NPerRng){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N) for(int iOut = 0; iOut < NPerRng; iOut += 2) BoxMuller( d_Random[iRng + (iOut + 0) * MT_RNG_COUNT], d_Random[iRng + (iOut + 1) * MT_RNG_COUNT] ); } extern "C" void init_MT(int n_gaussnumbers, int n_unifnumbers){ hipError_t cudaerr; //determine sizes for gauss numbers printf("Initializing MT random number generator...\n"); PATH_N_GAUSS = n_gaussnumbers; N_PER_RNG_GAUSS = iAlignUp(iDivUp(PATH_N_GAUSS, MT_RNG_COUNT), 2); RAND_N_GAUSS = MT_RNG_COUNT * N_PER_RNG_GAUSS; printf("No. of gauss random numbers: %d\n", RAND_N_GAUSS ); //determine sizes for unif. numbers PATH_N_UNIF = n_unifnumbers; N_PER_RNG_UNIF = iAlignUp(iDivUp(PATH_N_UNIF, MT_RNG_COUNT), 2); RAND_N_UNIF = MT_RNG_COUNT * N_PER_RNG_UNIF; printf("No. of unif. dist. random numbers: %d\n", RAND_N_UNIF ); // load and initialize twister configurations on device // seed the twisters const char *dat_path = "MersenneTwister.dat"; printf("Loading GPU twisters configurations from file %s...\n", dat_path); loadMTGPU(dat_path); seedMTGPU(); //allocate fields for random numbers printf("Allocating device memory for random numbers...\n"); CUDA_SAFE_CALL(hipMalloc((void **)&dev_rndgauss_field, RAND_N_GAUSS * sizeof(float)) ); CUDA_SAFE_CALL(hipMalloc((void **)&dev_rndunif_field, RAND_N_UNIF * sizeof(float))); // CREATE FIRST RANDOM NUMBERS /* update the random field for gauss numbers -> BoxMuller afterwards*/ hipDeviceSynchronize(); hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, dev_rndgauss_field, N_PER_RNG_GAUSS,0); hipLaunchKernelGGL(( BoxMullerGPU), dim3(32), dim3(128), 0, 0, dev_rndgauss_field, N_PER_RNG_GAUSS); /* update the random field for unif. dist. numbers*/ hipDeviceSynchronize(); hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, dev_rndunif_field, N_PER_RNG_UNIF,0); hipDeviceSynchronize(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } } extern "C" void update_MT(){ /* update the random field for gauss numbers -> BoxMuller afterwards*/ hipDeviceSynchronize(); hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, dev_rndgauss_field, N_PER_RNG_GAUSS,1); hipLaunchKernelGGL(( BoxMullerGPU), dim3(32), dim3(128), 0, 0, dev_rndgauss_field, N_PER_RNG_GAUSS); hipDeviceSynchronize(); /* update the random field for unif. dist. numbers*/ hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, dev_rndunif_field, N_PER_RNG_UNIF, 1); hipDeviceSynchronize(); /* float * blub = (float*) malloc(RAND_N_UNIF*sizeof(float)); printf("%d \n", RAND_N_UNIF); CUDA_SAFE_CALL(hipMemcpy(blub, dev_rndunif_field, (size_t)(RAND_N_UNIF*sizeof(float)), hipMemcpyDeviceToHost)); for(int k=0; k<4; k++){ for(int j=VOLUME/2-10; j<VOLUME/2; j++){ printf("%f, ", blub[4*j+k]); } } printf("\n\n"); free(blub); */ } extern "C" void finalize_MT(){ hipFree(dev_rndgauss_field); hipFree(dev_rndunif_field); }
a555e34aa8208abed57505c1d86d4f300516b911.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* * This sample implements Mersenne Twister random number generator * and Cartesian Box-Muller transformation on the GPU. * See supplied whitepaper for more explanations. */ #include "MersenneTwister.h" /////////////////////////////////////////////////////////////////////////////// // Common host and device function /////////////////////////////////////////////////////////////////////////////// //ceil(a / b) extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } //floor(a / b) extern "C" int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b extern "C" int iAlignUp(int a, int b){ return ((a % b) != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b extern "C" int iAlignDown(int a, int b){ return a - a % b; } /////////////////////////////////////////////////////////////////////////////// // Data configuration /////////////////////////////////////////////////////////////////////////////// static int PATH_N_GAUSS; static int PATH_N_UNIF; static int N_PER_RNG_GAUSS; static int N_PER_RNG_UNIF; static int RAND_N_GAUSS; static int RAND_N_UNIF; __device__ mt_struct_stripped ds_MT[MT_RNG_COUNT]; static mt_struct_stripped h_MT[MT_RNG_COUNT]; __device__ unsigned int d_mtstatus[MT_RNG_COUNT][MT_NN]; // fields for the MT random number generator //__device__ float * dev_rndunif_field; //__device__ float * dev_rndgauss_field; //Load twister configurations void loadMTGPU(const char *fname){ FILE *fd = fopen(fname, "rb"); if(!fd){ printf("initMTGPU(): failed to open %s\n", fname); printf("FAILED\n"); exit(0); } if( !fread(h_MT, sizeof(h_MT), 1, fd) ){ printf("initMTGPU(): failed to load %s\n", fname); printf("FAILED\n"); exit(0); } fclose(fd); } //Initialize/seed twister for current GPU context void seedMTGPU(){ int i; //Need to be thread-safe mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped)); /* initialize poor rng: */ srand ( time(NULL) ); /* initialize MT rng seeds */ for(i = 0; i < MT_RNG_COUNT; i++){ MT[i] = h_MT[i]; MT[i].seed = (unsigned int) rand(); } CUDA_SAFE_CALL( cudaMemcpyToSymbol(ds_MT, MT, sizeof(h_MT)) ); free(MT); } //Save twister for current GPU context void saveMTGPU(const char *fname){ FILE *fd = fopen(fname, "w"); if(!fd){ printf("saveMTGPU(): failed to open %s\n", fname); printf("FAILED\n"); exit(0); } fwrite(h_MT, sizeof(h_MT), 1, fd); fclose(fd); } //////////////////////////////////////////////////////////////////////////////// // Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random. // For coalesced global writes MT_RNG_COUNT should be a multiple of warp size. // Initial states for each generator are the same, since the states are // initialized from the global seed. In order to improve distribution properties // on small NPerRng supply dedicated (local) seed to each twister. // The local seeds, in their turn, can be extracted from global seed // by means of any simple random number generator, like LCG. //////////////////////////////////////////////////////////////////////////////// __global__ void RandomGPU( float *d_Random, int NPerRng, int initialized ){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; int iState, iState1, iStateM, iOut; unsigned int mti, mti1, mtiM, x; unsigned int mt[MT_NN]; for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){ //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; if(!initialized){ // initialize seed and construct status mt must be initialized from host before mt[0] = ds_MT[iRng].seed; for(iState = 1; iState < MT_NN; iState++) mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK; } else{ for(iState = 0; iState < MT_NN; iState++) mt[iState] = d_mtstatus[iRng][iState]; } iState = 0; mti1 = mt[0]; for(iOut = 0; iOut < NPerRng; iOut++){ //iState1 = (iState + 1) % MT_NN //iStateM = (iState + MT_MM) % MT_NN iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //Convert to (0, 1] float and write to global memory d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f; } // save status of mt ds_MT[iRng].seed = mt[0]; for(iState = 0; iState < MT_NN; iState++) d_mtstatus[iRng][iState] = mt[iState]; } } //////////////////////////////////////////////////////////////////////////////// // Transform each of MT_RNG_COUNT lanes of NPerRng uniformly distributed // random samples, produced by RandomGPU(), to normally distributed lanes // using Cartesian form of Box-Muller transformation. // NPerRng must be even. //////////////////////////////////////////////////////////////////////////////// #define PIf 3.14159265358979f __device__ inline void BoxMuller(float& u1, float& u2){ float r = sqrtf(-2.0f * logf(u1)); float phi = 2 * PIf * u2; u1 = r * __cosf(phi); u2 = r * __sinf(phi); } __global__ void BoxMullerGPU(float *d_Random, int NPerRng){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N) for(int iOut = 0; iOut < NPerRng; iOut += 2) BoxMuller( d_Random[iRng + (iOut + 0) * MT_RNG_COUNT], d_Random[iRng + (iOut + 1) * MT_RNG_COUNT] ); } extern "C" void init_MT(int n_gaussnumbers, int n_unifnumbers){ cudaError_t cudaerr; //determine sizes for gauss numbers printf("Initializing MT random number generator...\n"); PATH_N_GAUSS = n_gaussnumbers; N_PER_RNG_GAUSS = iAlignUp(iDivUp(PATH_N_GAUSS, MT_RNG_COUNT), 2); RAND_N_GAUSS = MT_RNG_COUNT * N_PER_RNG_GAUSS; printf("No. of gauss random numbers: %d\n", RAND_N_GAUSS ); //determine sizes for unif. numbers PATH_N_UNIF = n_unifnumbers; N_PER_RNG_UNIF = iAlignUp(iDivUp(PATH_N_UNIF, MT_RNG_COUNT), 2); RAND_N_UNIF = MT_RNG_COUNT * N_PER_RNG_UNIF; printf("No. of unif. dist. random numbers: %d\n", RAND_N_UNIF ); // load and initialize twister configurations on device // seed the twisters const char *dat_path = "MersenneTwister.dat"; printf("Loading GPU twisters configurations from file %s...\n", dat_path); loadMTGPU(dat_path); seedMTGPU(); //allocate fields for random numbers printf("Allocating device memory for random numbers...\n"); CUDA_SAFE_CALL(cudaMalloc((void **)&dev_rndgauss_field, RAND_N_GAUSS * sizeof(float)) ); CUDA_SAFE_CALL(cudaMalloc((void **)&dev_rndunif_field, RAND_N_UNIF * sizeof(float))); // CREATE FIRST RANDOM NUMBERS /* update the random field for gauss numbers -> BoxMuller afterwards*/ cudaThreadSynchronize(); RandomGPU<<<32, 128>>>(dev_rndgauss_field, N_PER_RNG_GAUSS,0); BoxMullerGPU<<<32, 128>>>(dev_rndgauss_field, N_PER_RNG_GAUSS); /* update the random field for unif. dist. numbers*/ cudaThreadSynchronize(); RandomGPU<<<32, 128>>>(dev_rndunif_field, N_PER_RNG_UNIF,0); cudaThreadSynchronize(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } } extern "C" void update_MT(){ /* update the random field for gauss numbers -> BoxMuller afterwards*/ cudaThreadSynchronize(); RandomGPU<<<32, 128>>>(dev_rndgauss_field, N_PER_RNG_GAUSS,1); BoxMullerGPU<<<32, 128>>>(dev_rndgauss_field, N_PER_RNG_GAUSS); cudaThreadSynchronize(); /* update the random field for unif. dist. numbers*/ RandomGPU<<<32, 128>>>(dev_rndunif_field, N_PER_RNG_UNIF, 1); cudaThreadSynchronize(); /* float * blub = (float*) malloc(RAND_N_UNIF*sizeof(float)); printf("%d \n", RAND_N_UNIF); CUDA_SAFE_CALL(cudaMemcpy(blub, dev_rndunif_field, (size_t)(RAND_N_UNIF*sizeof(float)), cudaMemcpyDeviceToHost)); for(int k=0; k<4; k++){ for(int j=VOLUME/2-10; j<VOLUME/2; j++){ printf("%f, ", blub[4*j+k]); } } printf("\n\n"); free(blub); */ } extern "C" void finalize_MT(){ cudaFree(dev_rndgauss_field); cudaFree(dev_rndunif_field); }
e48f4593c25a977fe4a180362533f96b6b929d6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "srad.h" #include <stdio.h> __global__ void srad_cuda_1( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, int cols, int rows, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float n, w, e, s, jc, g2, l, num, den, qsqr, c; //shared memory allocation __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float north[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float south[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float west[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory north[ty][tx] = J_cuda[index_n]; south[ty][tx] = J_cuda[index_s]; if ( by == 0 ){ north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx]; } else if ( by == gridDim.y - 1 ){ south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[index_e]; if ( bx == 0 ){ west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty]; } else if ( bx == gridDim.x - 1 ){ east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); temp[ty][tx] = J_cuda[index]; __syncthreads(); jc = temp[ty][tx]; if ( ty == 0 && tx == 0 ){ //nw n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 ){ //n n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == BLOCK_SIZE -1 ){ //e n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1){ //s n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == 0 ){ //w n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else{ //the data elements which are not on the borders n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc); l = ( n + s + w + e ) / jc; num = (0.5*g2) - ((1.0/16.0)*(l*l)) ; den = 1 + (.25*l); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c < 0){temp_result[ty][tx] = 0;} else if (c > 1) {temp_result[ty][tx] = 1;} else {temp_result[ty][tx] = c;} __syncthreads(); C_cuda[index] = temp_result[ty][tx]; E_C[index] = e; W_C[index] = w; S_C[index] = s; N_C[index] = n; } __global__ void srad_cuda_2( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, int cols, int rows, float lambda, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float cc, cn, cs, ce, cw, d_sum; //shared memory allocation __shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory temp[ty][tx] = J_cuda[index]; __syncthreads(); south_c[ty][tx] = C_cuda[index_s]; if ( by == gridDim.y - 1 ){ south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); east_c[ty][tx] = C_cuda[index_e]; if ( bx == gridDim.x - 1 ){ east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); c_cuda_temp[ty][tx] = C_cuda[index]; __syncthreads(); cc = c_cuda_temp[ty][tx]; if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se cn = cc; cs = south_c[ty][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( tx == BLOCK_SIZE -1 ){ //e cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( ty == BLOCK_SIZE -1){ //s cn = cc; cs = south_c[ty][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } else{ //the data elements which are not on the borders cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } // divergence (equ 58) d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index]; // image update (equ 61) c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum; __syncthreads(); J_cuda[index] = c_cuda_result[ty][tx]; }
e48f4593c25a977fe4a180362533f96b6b929d6c.cu
#include "srad.h" #include <stdio.h> __global__ void srad_cuda_1( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, int cols, int rows, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float n, w, e, s, jc, g2, l, num, den, qsqr, c; //shared memory allocation __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float north[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float south[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float west[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory north[ty][tx] = J_cuda[index_n]; south[ty][tx] = J_cuda[index_s]; if ( by == 0 ){ north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx]; } else if ( by == gridDim.y - 1 ){ south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[index_e]; if ( bx == 0 ){ west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty]; } else if ( bx == gridDim.x - 1 ){ east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); temp[ty][tx] = J_cuda[index]; __syncthreads(); jc = temp[ty][tx]; if ( ty == 0 && tx == 0 ){ //nw n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 ){ //n n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == BLOCK_SIZE -1 ){ //e n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1){ //s n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == 0 ){ //w n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else{ //the data elements which are not on the borders n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc); l = ( n + s + w + e ) / jc; num = (0.5*g2) - ((1.0/16.0)*(l*l)) ; den = 1 + (.25*l); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c < 0){temp_result[ty][tx] = 0;} else if (c > 1) {temp_result[ty][tx] = 1;} else {temp_result[ty][tx] = c;} __syncthreads(); C_cuda[index] = temp_result[ty][tx]; E_C[index] = e; W_C[index] = w; S_C[index] = s; N_C[index] = n; } __global__ void srad_cuda_2( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, int cols, int rows, float lambda, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float cc, cn, cs, ce, cw, d_sum; //shared memory allocation __shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory temp[ty][tx] = J_cuda[index]; __syncthreads(); south_c[ty][tx] = C_cuda[index_s]; if ( by == gridDim.y - 1 ){ south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); east_c[ty][tx] = C_cuda[index_e]; if ( bx == gridDim.x - 1 ){ east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); c_cuda_temp[ty][tx] = C_cuda[index]; __syncthreads(); cc = c_cuda_temp[ty][tx]; if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se cn = cc; cs = south_c[ty][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( tx == BLOCK_SIZE -1 ){ //e cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( ty == BLOCK_SIZE -1){ //s cn = cc; cs = south_c[ty][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } else{ //the data elements which are not on the borders cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } // divergence (equ 58) d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index]; // image update (equ 61) c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum; __syncthreads(); J_cuda[index] = c_cuda_result[ty][tx]; }
50656a70d6d247cf45aff345a0d2c46fc608806c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <algorithm> #include <roctracer/roctx.h> #include "core.h" #include "common_kernel.h" #include "copy_kernel.h" #include "enqueue.h" #include "reduce_kernel.h" /* HIERARCHY * * The data is split into CHUNKS, and each CHUNK is split into NUM_SUBCHUNKS * SUBCHUNKS, where each SUBCHUNK is processed independently. A SUBCHUNK is * split into numUnroll UNROLLS and each thread performs UNROLL_COUNT * single-data-element operations inside an UNROLL. As the name suggests, the * UNROLL_COUNT operations within an UNROLL are unrolled. */ // Number of threads used to perform copies, etc. Must be multiple of 32. // An additional thread is used to handle threadfences, so the CUDA blocks // have dimension NUM_THREADS+1. #define NUM_THREADS 256 // Each thread unrolls the innermost loop of the copy or reduction operations // to this many single-data-element instructions #define UNROLL_COUNT 8 #define UNROLL_SIZE (UNROLL_COUNT * NUM_THREADS) // To hide the latency associated with the synchronization between different // subchunks, we interleave the independent subchunks so that more data can be // transferred while the sync is in progress. This is the number of subchunks // that are active at the same time #define NUM_SUBCHUNKS 4 // if this is called with CHUNK, it means that we just finished pushing the data // of chunk CHUNK to the next GPU, so it can proceed with CHUNK // We add 1 to chunk so that the initial flag of 0 doesn't allow the non-root // GPUs to proceed before the flag is incremented from the upstream GPU. This // is called by one particular consumer warp and so we select the first thread // in the warp to set the flag. #define SIGNAL_NEW_DATA_AVAILABLE(chunk, subchunk) \ do { \ __threadfence_system(); \ args.NextNewDataAvailableFlag[0] = NUM_SUBCHUNKS*(chunk) + subchunk + 1; \ } while (0) // This is called by all producer threads, but only thread 0 spins on the flag, #define WAIT_FOR_NEW_DATA(chunk, subchunk) \ do { \ if (tid == 0) { \ Wait([=] { \ return ((volatile int *)args.ThisNewDataAvailableFlag)[0] >= \ NUM_SUBCHUNKS*(chunk) + subchunk + 1; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) // If this is called with CHUNK, it means that this GPU has just finished // processing the chunk CHUNK and so the previous GPU can start with CHUNK + 1 #define SIGNAL_CHUNK_DONE(chunk, subchunk) \ do { \ args.PrevChunkDoneFlag[0] = NUM_SUBCHUNKS*(chunk) + subchunk + 1; \ } while (0) // This is called by all producer threads, but only thread 0 spins on the flag, // all threads synchronize after thread 0 is done spinning. #define WAIT_FOR_CHUNK(chunk, subchunk) \ do { \ if (tid == 0) { \ Wait([=] { \ return ((volatile int *)args.ThisChunkDoneFlag)[0] >= \ NUM_SUBCHUNKS*(chunk) + subchunk + 1 - NUM_SUBCHUNKS; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) // This is called by all producer threads, but only thread 0 spins on the flag, // all threads synchronize after thread 0 is done spinning. #define WAIT_FOR_NEW_DATA_AND_CHUNK(chunk, subchunk) \ do { \ if (tid == 0) { \ Wait([=] { \ bool newDataAvailable = \ ((volatile int *)args.ThisNewDataAvailableFlag)[0] >= \ NUM_SUBCHUNKS*(chunk) + subchunk + 1; \ bool chunkDone = \ ((volatile int *)args.ThisChunkDoneFlag)[0] >= \ NUM_SUBCHUNKS*(chunk)+subchunk + 1 - NUM_SUBCHUNKS; \ return newDataAvailable && chunkDone; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) __device__ inline void getSliceSizeAndOffset(int *size, int *offset, int slice, int numSlices, int numBigSlices, int numSmallSlices, int bigSliceN, int smallSliceN, int lastSliceN) { if (slice < numBigSlices) { *size = bigSliceN; *offset = slice * bigSliceN; } else { *size = (slice < numBigSlices + numSmallSlices) ? smallSliceN : ((slice == numSlices - 1) ? lastSliceN : 0); *offset = numBigSlices * bigSliceN + (slice - numBigSlices) * smallSliceN; } // if (threadIdx.x == 0) // printf("[size=%d] [offset=%d] slice=%d numSlices=%d " // "numBigSlices=%d numSmallSlices=%d bigSliceN=%d smallSliceN=%d " // "lastSliceN=%d\n", *size, *offset, slice, numSlices, numBigSlices, // numSmallSlices, bigSliceN, smallSliceN, lastSliceN); } template<typename T> struct ReduceKernelArgs { // general parameters int ThisId; int N; // some pre-computed sizes int SliceSize; int ChunkSize; int NumChunks; int BufferSliceStride; T ** ThisPtrToNextData; T ** PrevPtrToThisData; // local and remote data T * __restrict__ Output; const T * __restrict__ ThisData; volatile T * __restrict__ ThisBuffer; volatile T * __restrict__ NextBuffer; // local and remote flags volatile int * __restrict__ ThisNewDataAvailableFlag; volatile int * __restrict__ NextNewDataAvailableFlag; volatile int * __restrict__ ThisChunkDoneFlag; volatile int * __restrict__ PrevChunkDoneFlag; }; __shared__ volatile void * nextData; enum ReduceRole {BEGIN=0, MIDDLE=1, END=2}; template<int THREADS, int UNROLL, class FUNC, int ROLE, typename T> __global__ void ReduceKernel(const ReduceKernelArgs<T> args) { if (args.N == 0) return; int tid = threadIdx.x; // First wait for args.PrevPtrToThisOutput to become nullptr to ensure that // the previous GPU is done with a previous collective operation. if (tid == 0) { Wait([=] { return *((T * volatile *)args.PrevPtrToThisData) == nullptr; // Wait for previous processor to be done }); *((T * volatile *)args.PrevPtrToThisData) = (T*)args.ThisData; // Tell Previous I'm starting Wait([=] { return *((T * volatile *)args.ThisPtrToNextData) != nullptr; // Wait till I've been told next started }); } __syncthreads(); for (int chunk = 0; chunk < args.NumChunks; ++chunk) { // calculate slice size. for all chunks except (possibly) the last one, // this will just be args.SliceSize. For the last one, it may be smaller int bigSliceN = args.SliceSize; int smallSliceN = 0; int lastSliceN = 0; int numSlices = NUM_SUBCHUNKS; int numBigSlices = numSlices; int numSmallSlices = 0; // last chunk if ((chunk + 1 == args.NumChunks) && (args.N % args.ChunkSize > 0)) CalcLastChunk<THREADS, UNROLL, T>(&bigSliceN, &smallSliceN, &lastSliceN, &numSlices, &numBigSlices, &numSmallSlices, args.N, args.NumChunks, args.ChunkSize); // this offset is only applied to Data pointers, not to Buffer pointers, // since we only have one buffer per chunk int chunkOffset = chunk * args.ChunkSize; int offset; int sliceSize; if (tid < THREADS) { for(int s=0; s<NUM_SUBCHUNKS; ++s) { getSliceSizeAndOffset(&sliceSize, &offset, s, numSlices, numBigSlices, numSmallSlices, bigSliceN, smallSliceN, lastSliceN); if (ROLE == BEGIN) { WAIT_FOR_CHUNK(chunk, s); Copy<UNROLL, THREADS>( args.NextBuffer + (s * args.BufferSliceStride), args.ThisData + chunkOffset + offset, sliceSize); } else if (ROLE == MIDDLE) { WAIT_FOR_NEW_DATA_AND_CHUNK(chunk, s); Reduce<UNROLL, THREADS, FUNC>( args.NextBuffer + (s * args.BufferSliceStride), args.ThisData + chunkOffset + offset, args.ThisBuffer + (s * args.BufferSliceStride), sliceSize); } else { // ROLE == END WAIT_FOR_NEW_DATA(chunk, s); Reduce<UNROLL, THREADS, FUNC>( args.Output + chunkOffset + offset, args.ThisData + chunkOffset + offset, args.ThisBuffer + (s * args.BufferSliceStride), sliceSize); } __syncthreads(); } } else { // Consumer thread for(int s=0; s<NUM_SUBCHUNKS; ++s) { __syncthreads(); if (ROLE != END) SIGNAL_NEW_DATA_AVAILABLE(chunk, s); // signal chunk done if we don't push into the receive buffer and this // is no the last chunk and this is not root if ((ROLE != BEGIN) && (chunk + 1 < args.NumChunks)) { SIGNAL_CHUNK_DONE(chunk, s); } } } } // reset flags if (tid == 0) { args.ThisNewDataAvailableFlag[0] = 0; args.ThisChunkDoneFlag[0] = 0; *args.ThisPtrToNextData = nullptr; } } template<class FUNC, typename T> ncclResult_t ncclReduceWithTypeAndFunc(const void* sendbuff, void* recvbuff, const int count, const int root, ncclComm* comm, hipStream_t stream) { if (count == 0) return ncclSuccess; int index = comm->ncclId; const int numUnroll = 4; int rootId = comm->ringFromUser[root]; int nextId = (index + 1) % comm->nDev; int prevId = (index + comm->nDev - 1) % comm->nDev; // There is one slice per GPU, so a slice can be at most bufferN / numGPUs, // where bufferN is the number of elements of type T that fit into the buffer. // For efficiency, we want the slice size to be a multiple of UNROLL_SIZE int bufferN = comm->buffSize / sizeof(T); // we only need buffer for k slices and k paddings int bufferNPerSlice = bufferN / NUM_SUBCHUNKS; int maxSliceSize = (bufferNPerSlice / UNROLL_SIZE) * UNROLL_SIZE; ReduceKernelArgs<T> args; args.ThisId = index; args.N = count; args.SliceSize = numUnroll * UNROLL_SIZE * sizeof(PackType) / sizeof(T); if(!comm->useRemoteRecv) { // Proxy for QPI. Reduce never pushes directly to recv. // But larger transfers help QPI more than tag updates hurt P2P. args.SliceSize *= 8; } // make sure slice fits into the temporary buffer args.SliceSize = ::min(maxSliceSize, args.SliceSize); args.BufferSliceStride = args.SliceSize; args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize; // avoid a case where we have one or more big chunks and one tiny one int remainder = args.N % args.ChunkSize; if ((args.N > args.ChunkSize) && (remainder > 0) && (args.N < 5 * args.ChunkSize) && (2 * remainder < args.ChunkSize)) { args.SliceSize /= 2; args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize; // round down so we end up with a big last chunk args.NumChunks = args.N / args.ChunkSize; } else { // round up args.NumChunks = (args.N + args.ChunkSize - 1) / args.ChunkSize; } args.ThisPtrToNextData = (T**)&(comm->local[nextId]->recvPtrs[0]); args.PrevPtrToThisData = (T**)&(comm->remote[prevId]->recvPtrs[0]); args.Output = (T*)recvbuff; args.ThisData = (const T*) sendbuff; args.ThisBuffer = (volatile T*)comm->local[prevId]->buff; args.NextBuffer = (volatile T*)comm->remote[nextId]->buff; args.ThisNewDataAvailableFlag = comm->local[prevId]->flags; args.NextNewDataAvailableFlag = comm->remote[nextId]->flags; args.ThisChunkDoneFlag = comm->local[nextId]->flags + 1; args.PrevChunkDoneFlag = comm->remote[prevId]->flags + 1; if (index == (rootId + 1) % comm->nDev) { hipLaunchKernelGGL(( ReduceKernel<NUM_THREADS, UNROLL_COUNT, FUNC, BEGIN, T>) , dim3(1), dim3(NUM_THREADS + 1), 0, stream, args); } else if (index == rootId) { hipLaunchKernelGGL(( ReduceKernel<NUM_THREADS, UNROLL_COUNT, FUNC, END, T>) , dim3(1), dim3(NUM_THREADS + 1), 0, stream, args); } else { hipLaunchKernelGGL(( ReduceKernel<NUM_THREADS, UNROLL_COUNT, FUNC, MIDDLE, T>) , dim3(1), dim3(NUM_THREADS + 1), 0, stream, args); } return ncclSuccess; } template <typename T> ncclResult_t ncclReduceWithType(const void* sendbuff, void* recvbuff, int count, ncclRedOp_t op, int root, ncclComm* comm, hipStream_t stream) { switch (op) { case ncclSum: return ncclReduceWithTypeAndFunc<FuncSum<T>, T>( sendbuff, recvbuff, count, root, comm, stream); case ncclProd: return ncclReduceWithTypeAndFunc<FuncProd<T>, T>( sendbuff, recvbuff, count, root, comm, stream); case ncclMax: return ncclReduceWithTypeAndFunc<FuncMax<T>, T>( sendbuff, recvbuff, count, root, comm, stream); case ncclMin: return ncclReduceWithTypeAndFunc<FuncMin<T>, T>( sendbuff, recvbuff, count, root, comm, stream); } return ncclInvalidOperation; } class ReduceFunctor { public: ncclResult_t operator()(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t op, int root, ncclComm* comm, hipStream_t stream) { switch (datatype) { case ncclChar: return ncclReduceWithType<char>(sendbuff, recvbuff, count, op, root, comm, stream); case ncclInt: return ncclReduceWithType<int>(sendbuff, recvbuff, count, op, root, comm, stream); #ifdef CUDA_HAS_HALF case ncclHalf: return ncclReduceWithType<half>(sendbuff, recvbuff, count, op, root, comm, stream); #endif case ncclFloat: return ncclReduceWithType<float>(sendbuff, recvbuff, count, op, root, comm, stream); case ncclDouble: return ncclReduceWithType<double>(sendbuff, recvbuff, count, op, root, comm, stream); } return ncclInvalidType; } }; extern "C" DSOGLOBAL ncclResult_t ncclReduce(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) { return enqueue(ReduceFunctor(), sendbuff, recvbuff, count, datatype, op, root, comm, stream); }
50656a70d6d247cf45aff345a0d2c46fc608806c.cu
/************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <algorithm> #include <nvToolsExt.h> #include "core.h" #include "common_kernel.h" #include "copy_kernel.h" #include "enqueue.h" #include "reduce_kernel.h" /* HIERARCHY * * The data is split into CHUNKS, and each CHUNK is split into NUM_SUBCHUNKS * SUBCHUNKS, where each SUBCHUNK is processed independently. A SUBCHUNK is * split into numUnroll UNROLLS and each thread performs UNROLL_COUNT * single-data-element operations inside an UNROLL. As the name suggests, the * UNROLL_COUNT operations within an UNROLL are unrolled. */ // Number of threads used to perform copies, etc. Must be multiple of 32. // An additional thread is used to handle threadfences, so the CUDA blocks // have dimension NUM_THREADS+1. #define NUM_THREADS 256 // Each thread unrolls the innermost loop of the copy or reduction operations // to this many single-data-element instructions #define UNROLL_COUNT 8 #define UNROLL_SIZE (UNROLL_COUNT * NUM_THREADS) // To hide the latency associated with the synchronization between different // subchunks, we interleave the independent subchunks so that more data can be // transferred while the sync is in progress. This is the number of subchunks // that are active at the same time #define NUM_SUBCHUNKS 4 // if this is called with CHUNK, it means that we just finished pushing the data // of chunk CHUNK to the next GPU, so it can proceed with CHUNK // We add 1 to chunk so that the initial flag of 0 doesn't allow the non-root // GPUs to proceed before the flag is incremented from the upstream GPU. This // is called by one particular consumer warp and so we select the first thread // in the warp to set the flag. #define SIGNAL_NEW_DATA_AVAILABLE(chunk, subchunk) \ do { \ __threadfence_system(); \ args.NextNewDataAvailableFlag[0] = NUM_SUBCHUNKS*(chunk) + subchunk + 1; \ } while (0) // This is called by all producer threads, but only thread 0 spins on the flag, #define WAIT_FOR_NEW_DATA(chunk, subchunk) \ do { \ if (tid == 0) { \ Wait([=] { \ return ((volatile int *)args.ThisNewDataAvailableFlag)[0] >= \ NUM_SUBCHUNKS*(chunk) + subchunk + 1; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) // If this is called with CHUNK, it means that this GPU has just finished // processing the chunk CHUNK and so the previous GPU can start with CHUNK + 1 #define SIGNAL_CHUNK_DONE(chunk, subchunk) \ do { \ args.PrevChunkDoneFlag[0] = NUM_SUBCHUNKS*(chunk) + subchunk + 1; \ } while (0) // This is called by all producer threads, but only thread 0 spins on the flag, // all threads synchronize after thread 0 is done spinning. #define WAIT_FOR_CHUNK(chunk, subchunk) \ do { \ if (tid == 0) { \ Wait([=] { \ return ((volatile int *)args.ThisChunkDoneFlag)[0] >= \ NUM_SUBCHUNKS*(chunk) + subchunk + 1 - NUM_SUBCHUNKS; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) // This is called by all producer threads, but only thread 0 spins on the flag, // all threads synchronize after thread 0 is done spinning. #define WAIT_FOR_NEW_DATA_AND_CHUNK(chunk, subchunk) \ do { \ if (tid == 0) { \ Wait([=] { \ bool newDataAvailable = \ ((volatile int *)args.ThisNewDataAvailableFlag)[0] >= \ NUM_SUBCHUNKS*(chunk) + subchunk + 1; \ bool chunkDone = \ ((volatile int *)args.ThisChunkDoneFlag)[0] >= \ NUM_SUBCHUNKS*(chunk)+subchunk + 1 - NUM_SUBCHUNKS; \ return newDataAvailable && chunkDone; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) __device__ inline void getSliceSizeAndOffset(int *size, int *offset, int slice, int numSlices, int numBigSlices, int numSmallSlices, int bigSliceN, int smallSliceN, int lastSliceN) { if (slice < numBigSlices) { *size = bigSliceN; *offset = slice * bigSliceN; } else { *size = (slice < numBigSlices + numSmallSlices) ? smallSliceN : ((slice == numSlices - 1) ? lastSliceN : 0); *offset = numBigSlices * bigSliceN + (slice - numBigSlices) * smallSliceN; } // if (threadIdx.x == 0) // printf("[size=%d] [offset=%d] slice=%d numSlices=%d " // "numBigSlices=%d numSmallSlices=%d bigSliceN=%d smallSliceN=%d " // "lastSliceN=%d\n", *size, *offset, slice, numSlices, numBigSlices, // numSmallSlices, bigSliceN, smallSliceN, lastSliceN); } template<typename T> struct ReduceKernelArgs { // general parameters int ThisId; int N; // some pre-computed sizes int SliceSize; int ChunkSize; int NumChunks; int BufferSliceStride; T ** ThisPtrToNextData; T ** PrevPtrToThisData; // local and remote data T * __restrict__ Output; const T * __restrict__ ThisData; volatile T * __restrict__ ThisBuffer; volatile T * __restrict__ NextBuffer; // local and remote flags volatile int * __restrict__ ThisNewDataAvailableFlag; volatile int * __restrict__ NextNewDataAvailableFlag; volatile int * __restrict__ ThisChunkDoneFlag; volatile int * __restrict__ PrevChunkDoneFlag; }; __shared__ volatile void * nextData; enum ReduceRole {BEGIN=0, MIDDLE=1, END=2}; template<int THREADS, int UNROLL, class FUNC, int ROLE, typename T> __global__ void ReduceKernel(const ReduceKernelArgs<T> args) { if (args.N == 0) return; int tid = threadIdx.x; // First wait for args.PrevPtrToThisOutput to become nullptr to ensure that // the previous GPU is done with a previous collective operation. if (tid == 0) { Wait([=] { return *((T * volatile *)args.PrevPtrToThisData) == nullptr; // Wait for previous processor to be done }); *((T * volatile *)args.PrevPtrToThisData) = (T*)args.ThisData; // Tell Previous I'm starting Wait([=] { return *((T * volatile *)args.ThisPtrToNextData) != nullptr; // Wait till I've been told next started }); } __syncthreads(); for (int chunk = 0; chunk < args.NumChunks; ++chunk) { // calculate slice size. for all chunks except (possibly) the last one, // this will just be args.SliceSize. For the last one, it may be smaller int bigSliceN = args.SliceSize; int smallSliceN = 0; int lastSliceN = 0; int numSlices = NUM_SUBCHUNKS; int numBigSlices = numSlices; int numSmallSlices = 0; // last chunk if ((chunk + 1 == args.NumChunks) && (args.N % args.ChunkSize > 0)) CalcLastChunk<THREADS, UNROLL, T>(&bigSliceN, &smallSliceN, &lastSliceN, &numSlices, &numBigSlices, &numSmallSlices, args.N, args.NumChunks, args.ChunkSize); // this offset is only applied to Data pointers, not to Buffer pointers, // since we only have one buffer per chunk int chunkOffset = chunk * args.ChunkSize; int offset; int sliceSize; if (tid < THREADS) { for(int s=0; s<NUM_SUBCHUNKS; ++s) { getSliceSizeAndOffset(&sliceSize, &offset, s, numSlices, numBigSlices, numSmallSlices, bigSliceN, smallSliceN, lastSliceN); if (ROLE == BEGIN) { WAIT_FOR_CHUNK(chunk, s); Copy<UNROLL, THREADS>( args.NextBuffer + (s * args.BufferSliceStride), args.ThisData + chunkOffset + offset, sliceSize); } else if (ROLE == MIDDLE) { WAIT_FOR_NEW_DATA_AND_CHUNK(chunk, s); Reduce<UNROLL, THREADS, FUNC>( args.NextBuffer + (s * args.BufferSliceStride), args.ThisData + chunkOffset + offset, args.ThisBuffer + (s * args.BufferSliceStride), sliceSize); } else { // ROLE == END WAIT_FOR_NEW_DATA(chunk, s); Reduce<UNROLL, THREADS, FUNC>( args.Output + chunkOffset + offset, args.ThisData + chunkOffset + offset, args.ThisBuffer + (s * args.BufferSliceStride), sliceSize); } __syncthreads(); } } else { // Consumer thread for(int s=0; s<NUM_SUBCHUNKS; ++s) { __syncthreads(); if (ROLE != END) SIGNAL_NEW_DATA_AVAILABLE(chunk, s); // signal chunk done if we don't push into the receive buffer and this // is no the last chunk and this is not root if ((ROLE != BEGIN) && (chunk + 1 < args.NumChunks)) { SIGNAL_CHUNK_DONE(chunk, s); } } } } // reset flags if (tid == 0) { args.ThisNewDataAvailableFlag[0] = 0; args.ThisChunkDoneFlag[0] = 0; *args.ThisPtrToNextData = nullptr; } } template<class FUNC, typename T> ncclResult_t ncclReduceWithTypeAndFunc(const void* sendbuff, void* recvbuff, const int count, const int root, ncclComm* comm, cudaStream_t stream) { if (count == 0) return ncclSuccess; int index = comm->ncclId; const int numUnroll = 4; int rootId = comm->ringFromUser[root]; int nextId = (index + 1) % comm->nDev; int prevId = (index + comm->nDev - 1) % comm->nDev; // There is one slice per GPU, so a slice can be at most bufferN / numGPUs, // where bufferN is the number of elements of type T that fit into the buffer. // For efficiency, we want the slice size to be a multiple of UNROLL_SIZE int bufferN = comm->buffSize / sizeof(T); // we only need buffer for k slices and k paddings int bufferNPerSlice = bufferN / NUM_SUBCHUNKS; int maxSliceSize = (bufferNPerSlice / UNROLL_SIZE) * UNROLL_SIZE; ReduceKernelArgs<T> args; args.ThisId = index; args.N = count; args.SliceSize = numUnroll * UNROLL_SIZE * sizeof(PackType) / sizeof(T); if(!comm->useRemoteRecv) { // Proxy for QPI. Reduce never pushes directly to recv. // But larger transfers help QPI more than tag updates hurt P2P. args.SliceSize *= 8; } // make sure slice fits into the temporary buffer args.SliceSize = std::min(maxSliceSize, args.SliceSize); args.BufferSliceStride = args.SliceSize; args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize; // avoid a case where we have one or more big chunks and one tiny one int remainder = args.N % args.ChunkSize; if ((args.N > args.ChunkSize) && (remainder > 0) && (args.N < 5 * args.ChunkSize) && (2 * remainder < args.ChunkSize)) { args.SliceSize /= 2; args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize; // round down so we end up with a big last chunk args.NumChunks = args.N / args.ChunkSize; } else { // round up args.NumChunks = (args.N + args.ChunkSize - 1) / args.ChunkSize; } args.ThisPtrToNextData = (T**)&(comm->local[nextId]->recvPtrs[0]); args.PrevPtrToThisData = (T**)&(comm->remote[prevId]->recvPtrs[0]); args.Output = (T*)recvbuff; args.ThisData = (const T*) sendbuff; args.ThisBuffer = (volatile T*)comm->local[prevId]->buff; args.NextBuffer = (volatile T*)comm->remote[nextId]->buff; args.ThisNewDataAvailableFlag = comm->local[prevId]->flags; args.NextNewDataAvailableFlag = comm->remote[nextId]->flags; args.ThisChunkDoneFlag = comm->local[nextId]->flags + 1; args.PrevChunkDoneFlag = comm->remote[prevId]->flags + 1; if (index == (rootId + 1) % comm->nDev) { ReduceKernel<NUM_THREADS, UNROLL_COUNT, FUNC, BEGIN, T> <<<1, NUM_THREADS + 1, 0, stream>>>(args); } else if (index == rootId) { ReduceKernel<NUM_THREADS, UNROLL_COUNT, FUNC, END, T> <<<1, NUM_THREADS + 1, 0, stream>>>(args); } else { ReduceKernel<NUM_THREADS, UNROLL_COUNT, FUNC, MIDDLE, T> <<<1, NUM_THREADS + 1, 0, stream>>>(args); } return ncclSuccess; } template <typename T> ncclResult_t ncclReduceWithType(const void* sendbuff, void* recvbuff, int count, ncclRedOp_t op, int root, ncclComm* comm, cudaStream_t stream) { switch (op) { case ncclSum: return ncclReduceWithTypeAndFunc<FuncSum<T>, T>( sendbuff, recvbuff, count, root, comm, stream); case ncclProd: return ncclReduceWithTypeAndFunc<FuncProd<T>, T>( sendbuff, recvbuff, count, root, comm, stream); case ncclMax: return ncclReduceWithTypeAndFunc<FuncMax<T>, T>( sendbuff, recvbuff, count, root, comm, stream); case ncclMin: return ncclReduceWithTypeAndFunc<FuncMin<T>, T>( sendbuff, recvbuff, count, root, comm, stream); } return ncclInvalidOperation; } class ReduceFunctor { public: ncclResult_t operator()(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t op, int root, ncclComm* comm, cudaStream_t stream) { switch (datatype) { case ncclChar: return ncclReduceWithType<char>(sendbuff, recvbuff, count, op, root, comm, stream); case ncclInt: return ncclReduceWithType<int>(sendbuff, recvbuff, count, op, root, comm, stream); #ifdef CUDA_HAS_HALF case ncclHalf: return ncclReduceWithType<half>(sendbuff, recvbuff, count, op, root, comm, stream); #endif case ncclFloat: return ncclReduceWithType<float>(sendbuff, recvbuff, count, op, root, comm, stream); case ncclDouble: return ncclReduceWithType<double>(sendbuff, recvbuff, count, op, root, comm, stream); } return ncclInvalidType; } }; extern "C" DSOGLOBAL ncclResult_t ncclReduce(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) { return enqueue(ReduceFunctor(), sendbuff, recvbuff, count, datatype, op, root, comm, stream); }
953befcdb461d4335971e7c59045784c68e9f5c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <storage.cuh> #include <utils.cuh> #include <hiprand/hiprand_kernel.h> #include <device_launch_parameters.h> #include <cmath> Storage::Storage(const std::vector<int> &_shape) : shape(_shape) { int size = 1; for (int i = 0; i < _shape.size(); i++) { size *= _shape[i]; } this->data.resize(size); } Storage::Storage(const std::vector<int> &_shape, float value) : shape(_shape) { int size = 1; for (int i = 0; i < _shape.size(); i++) { size *= _shape[i]; } this->data.resize(size, value); } Storage::Storage(const std::vector<int> &_shape, const std::vector<float> &_data) : shape(_shape), data(_data.begin(), _data.end()) { this->check_size(); } Storage::Storage(const Storage &other) { *this = other; } Storage &Storage::operator=(const Storage &other) { if (this != &other) { this->shape = other.shape; this->data = other.data; } return *this; } Storage::Storage(Storage &&other) { *this = std::move(other); } Storage &Storage::operator=(Storage &&other) { if (this != &other) { this->shape = std::move(other.shape); this->data = std::move(other.data); } return *this; } void Storage::reshape(const std::vector<int> &_shape) { this->shape = _shape; this->check_size(); } void Storage::resize(const std::vector<int> &_shape) { this->shape = _shape; int size = 1; for (int i = 0; i < _shape.size(); i++) { size *= _shape[i]; } if (size != this->data.size()) { this->data.resize(size); } } __global__ void storage_xavier(float *a, int size, float scale, hiprandState_t *cs) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { hiprand_init(1234, index, 0, &cs[index]); a[index] = (hiprand_uniform(&cs[index]) * 2 - 1) * scale; } } void Storage::xavier(size_t in_size, size_t out_size) { float *a_ptr = RAW_PTR(this->data); int size = this->data.size(); int grid_size = ceil((float)(size) / BLOCK_SIZE); thrust::device_vector<hiprandState_t> cs(size); hiprandState_t *cs_ptr = RAW_PTR(cs); float scale = std::sqrt((float)6) / std::sqrt((float)(in_size) + out_size); hipLaunchKernelGGL(( storage_xavier), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, a_ptr, size, scale, cs_ptr); CUDA_POST_KERNEL_CHECK; } void Storage::check_size() { int size = 1; for (int i = 0; i < this->shape.size(); i++) { size *= this->shape[i]; } CHECK_EQ(size, this->data.size(), "Storage: size error"); }
953befcdb461d4335971e7c59045784c68e9f5c6.cu
#include <storage.cuh> #include <utils.cuh> #include <curand_kernel.h> #include <device_launch_parameters.h> #include <cmath> Storage::Storage(const std::vector<int> &_shape) : shape(_shape) { int size = 1; for (int i = 0; i < _shape.size(); i++) { size *= _shape[i]; } this->data.resize(size); } Storage::Storage(const std::vector<int> &_shape, float value) : shape(_shape) { int size = 1; for (int i = 0; i < _shape.size(); i++) { size *= _shape[i]; } this->data.resize(size, value); } Storage::Storage(const std::vector<int> &_shape, const std::vector<float> &_data) : shape(_shape), data(_data.begin(), _data.end()) { this->check_size(); } Storage::Storage(const Storage &other) { *this = other; } Storage &Storage::operator=(const Storage &other) { if (this != &other) { this->shape = other.shape; this->data = other.data; } return *this; } Storage::Storage(Storage &&other) { *this = std::move(other); } Storage &Storage::operator=(Storage &&other) { if (this != &other) { this->shape = std::move(other.shape); this->data = std::move(other.data); } return *this; } void Storage::reshape(const std::vector<int> &_shape) { this->shape = _shape; this->check_size(); } void Storage::resize(const std::vector<int> &_shape) { this->shape = _shape; int size = 1; for (int i = 0; i < _shape.size(); i++) { size *= _shape[i]; } if (size != this->data.size()) { this->data.resize(size); } } __global__ void storage_xavier(float *a, int size, float scale, curandState *cs) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { curand_init(1234, index, 0, &cs[index]); a[index] = (curand_uniform(&cs[index]) * 2 - 1) * scale; } } void Storage::xavier(size_t in_size, size_t out_size) { float *a_ptr = RAW_PTR(this->data); int size = this->data.size(); int grid_size = ceil((float)(size) / BLOCK_SIZE); thrust::device_vector<curandState> cs(size); curandState *cs_ptr = RAW_PTR(cs); float scale = std::sqrt((float)6) / std::sqrt((float)(in_size) + out_size); storage_xavier<<<grid_size, BLOCK_SIZE>>>(a_ptr, size, scale, cs_ptr); CUDA_POST_KERNEL_CHECK; } void Storage::check_size() { int size = 1; for (int i = 0; i < this->shape.size(); i++) { size *= this->shape[i]; } CHECK_EQ(size, this->data.size(), "Storage: size error"); }
580315111b8e3ae233a0c993961135abde5c3115.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(hipGetLastError()); return output; } // niu //AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] { AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { hipLaunchKernelGGL(( RoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(hipGetLastError()); return grad_input; } //niu //AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] { AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] { hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return grad_input; }
580315111b8e3ae233a0c993961135abde5c3115.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return output; } // niu //AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] { AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(cudaGetLastError()); return grad_input; } //niu //AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] { AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] { RoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return grad_input; }
4187d1860c4a00d32d8ffec1a2729127c68ee516.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Convolution 3D profiling */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "conv3d_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// using namespace cutlass::library; namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor Conv3dOperationProfiler::Conv3dOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kConv3d, { {ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"}, {ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"d", "input_d"}, "Input D dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"t", "filter_t"}, "Filter T dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"z", "output_z"}, "Output Z dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"pad_d"}, "Padding in D direction"}, {ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"}, {ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"}, {ArgumentTypeID::kInteger, {"stride_d"}, "Stride in D direction"}, {ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"}, {ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"}, {ArgumentTypeID::kInteger, {"dilation_d"}, "Dilation in D direction"}, {ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"}, {ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"}, {ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"}, {ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"}, {ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"}, {ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"}, {ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"}, }, { library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN } ) { description_ = " Conv3d operation. Output(Tensor5D) = alpha * Input(Tensor5D) * Filter(Tensor5D) + beta * Input(Tensor5D)"; } /// Destructor Conv3dOperationProfiler::~Conv3dOperationProfiler() { } /// Prints usage statement for the math function void Conv3dOperationProfiler::print_usage(std::ostream &out) const { out << "Conv3d" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void Conv3dOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular convolution (specify all the convolution parameters):\n" << " $ cutlass_profiler --operation=Conv3d" " --Activation=f16:ndhwc --Filter=f16:ndhwc --Output=f16 --accumulator-type=f32" " --n=32 --d=16 --h=14 --w=14 --c=8 --k=64 --t=3 --r=3 --s=3" " --pad_d=1 --pad_h=1 --pad_w=1" " --stride_d=1 --stride::h=1 --stride::w=1" " --dilation_d=1 --dilation::h=1 --dilation::w=1\n\n"; } #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif ///////////////////////////////////////////////////////////////////////////////////////////////// /// Total number of bytes loaded int64_t Conv3dOperationProfiler::Conv3dProblem::bytes(library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); // Input bytes read and Output bytes written for the gemm problem int64_t bytes_ = int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); } return bytes_; } /// Total number of flops computed int64_t Conv3dOperationProfiler::Conv3dProblem::flops( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2; int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2; // Adjust mainloop flop for dgrad strided if (operation_desc.conv_kind == library::ConvKind::kDgrad) { flops_mainloop_ = flops_mainloop_ / ( stride_d * stride_h * stride_w); } return (flops_mainloop_ + flops_epilogue_); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status Conv3dOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(operation->description()); if (!arg_as_int(problem_.n, "n", problem_space, problem)) { // default value problem_.n = 1; } if (!arg_as_int(problem_.d, "d", problem_space, problem)) { // default value problem_.d = 8; } if (!arg_as_int(problem_.h, "h", problem_space, problem)) { // default value problem_.h = 14; } if (!arg_as_int(problem_.w, "w", problem_space, problem)) { // default value problem_.w = 14; } if (!arg_as_int(problem_.c, "c", problem_space, problem)) { // default value problem_.c = 32; } if (!arg_as_int(problem_.k, "k", problem_space, problem)) { // default value problem_.k = 32; } if (!arg_as_int(problem_.t, "t", problem_space, problem)) { // default value problem_.t = 3; } if (!arg_as_int(problem_.r, "r", problem_space, problem)) { // default value problem_.r = 3; } if (!arg_as_int(problem_.s, "s", problem_space, problem)) { // default value problem_.s = 3; } if (!arg_as_int(problem_.pad_d, "pad_d", problem_space, problem)) { // default value problem_.pad_d = 1; } if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) { // default value problem_.pad_w = 1; } if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) { // default value problem_.pad_h = 1; } if (!arg_as_int(problem_.stride_d, "stride_d", problem_space, problem)) { // default value problem_.stride_d = 1; } if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) { // default value problem_.stride_h = 1; } if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) { // default value problem_.stride_w = 1; } if (!arg_as_int(problem_.dilation_d, "dilation_d", problem_space, problem)) { // default value problem_.dilation_d = 1; } if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) { // default value problem_.dilation_h = 1; } if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) { // default value problem_.dilation_w = 1; } //////////////////////// Convolution output dimensions p and q //////////////////////// // Cutlass convolutions support arbitrary output sizes and not constrained by // // input, filter, padding, striding, dilation sizes. // // cuDNN sets the output dimensions (p, q) using following equations: // // // // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // // where; div_up(a, b) : (a - 1)/b + 1 // // // // Thus, when output p and q dimensions are unspecified by the user // // cutlass profiler sets p and q which are cuDNN compliant. // // // //////////////////////////////////////////////////////////////////////////////////////// // set convolution output z if (!arg_as_int(problem_.z, "z", problem_space, problem)) { // default value (set using cudnn formula for output height, when p is not provided) problem_.z = ( problem_.d + 2 * problem_.pad_d - ((problem_.t - 1) * problem_.dilation_d + 1) ) / (problem_.stride_d) + 1; } // set convolution output p if (!arg_as_int(problem_.p, "p", problem_space, problem)) { // default value (set using cudnn formula for output height, when p is not provided) problem_.p = ( problem_.h + 2 * problem_.pad_h - ((problem_.r - 1) * problem_.dilation_h + 1) ) / (problem_.stride_h) + 1; } // set convolution output q if (!arg_as_int(problem_.q, "q", problem_space, problem)) { // default value (set using cudnn formula for output width, when q is not provided) problem_.q = ( problem_.w + 2 * problem_.pad_w - ((problem_.s - 1) * problem_.dilation_w + 1) ) / (problem_.stride_w) + 1; } ///////////////////////////////////////////////////////////////////////////////////////// if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) { // default value problem_.split_k_mode = library::SplitKMode::kSerial; } if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) { // default value problem_.split_k_slices = 1; } if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) { // default value problem_.conv_mode = library::ConvModeID::kCrossCorrelation; } if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) { // default value problem_.eq_gemm_provider = library::Provider::kNone; } if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( problem_.alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( problem_.beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } // initialize library::ConvConfiguration conv_workspace_.configuration.problem_size = conv::Conv3dProblemSize( int(problem_.n), int(problem_.d), int(problem_.h), int(problem_.w), int(problem_.c), int(problem_.k), int(problem_.t), int(problem_.r), int(problem_.s), int(problem_.z), int(problem_.p), int(problem_.q), int(problem_.pad_d), int(problem_.pad_h), int(problem_.pad_w), int(problem_.stride_d), int(problem_.stride_h), int(problem_.stride_w), int(problem_.dilation_d), int(problem_.dilation_h), int(problem_.dilation_w), static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)), int(problem_.split_k_slices), 1 // groups ); conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode)); conv_workspace_.configuration.layout_activations.stride() = make_Coord( int(problem_.c), int(problem_.w) * int(problem_.c), int(problem_.h) * int(problem_.w) * int(problem_.c), int(problem_.d) * int(problem_.h) * int(problem_.w) * int(problem_.c) ); conv_workspace_.configuration.layout_filters.stride() = make_Coord( int(problem_.c), int(problem_.s) * int(problem_.c), int(problem_.r) * int(problem_.s) * int(problem_.c), int(problem_.t) * int(problem_.r) * int(problem_.s) * int(problem_.c) ); conv_workspace_.configuration.layout_output.stride() = make_Coord( int(problem_.k), int(problem_.q) * int(problem_.k), int(problem_.q) * int(problem_.p) * int(problem_.k), int(problem_.z) * int(problem_.q) * int(problem_.p) * int(problem_.k) ); // initialize library::ConvArguments conv_workspace_.arguments.A = nullptr; conv_workspace_.arguments.B = nullptr; conv_workspace_.arguments.C = nullptr; conv_workspace_.arguments.D = nullptr; conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // initialize reduction operation for parallel splitKMode not supported for conv3d if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) { return Status::kErrorInternal; } } initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments); } /// Initializes the performance result void Conv3dOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::ConvDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; result.arguments.resize(problem_space.rank()); set_argument(result, "Activation", problem_space, std::string(library::to_string(operation_desc.activation().element)) + ":" + library::to_string(operation_desc.activation().layout)); set_argument(result, "Filter", problem_space, std::string(library::to_string(operation_desc.filter().element)) + ":" + library::to_string(operation_desc.filter().layout)); set_argument(result, "Output", problem_space, std::string(library::to_string(operation_desc.output().element)) + ":" + library::to_string(operation_desc.output().layout)); set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind)); set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm))); set_argument(result, "n", problem_space, problem_.n); set_argument(result, "d", problem_space, problem_.d); set_argument(result, "h", problem_space, problem_.h); set_argument(result, "w", problem_space, problem_.w); set_argument(result, "c", problem_space, problem_.c); set_argument(result, "k", problem_space, problem_.k); set_argument(result, "t", problem_space, problem_.t); set_argument(result, "r", problem_space, problem_.r); set_argument(result, "s", problem_space, problem_.s); set_argument(result, "z", problem_space, problem_.z); set_argument(result, "p", problem_space, problem_.p); set_argument(result, "q", problem_space, problem_.q); set_argument(result, "pad_d", problem_space, problem_.pad_d); set_argument(result, "pad_h", problem_space, problem_.pad_h); set_argument(result, "pad_w", problem_space, problem_.pad_w); set_argument(result, "stride_d", problem_space, problem_.stride_d); set_argument(result, "stride_h", problem_space, problem_.stride_h); set_argument(result, "stride_w", problem_space, problem_.stride_w); set_argument(result, "dilation_d", problem_space, problem_.dilation_d); set_argument(result, "dilation_h", problem_space, problem_.dilation_h); set_argument(result, "dilation_w", problem_space, problem_.dilation_w); set_argument(result, "split_k_mode", problem_space, std::string(library::to_string(problem_.split_k_mode))); set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices); set_argument(result, "conv_mode", problem_space, std::string(library::to_string(problem_.conv_mode))); set_argument(result, "alpha", problem_space, library::lexical_cast(problem_.alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(problem_.beta, operation_desc.element_epilogue)); set_argument(result, "eq_gemm_provider", problem_space, std::string(library::to_string(problem_.eq_gemm_provider))); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Bytes of activation, filter, and output tensors result.bytes = problem_.bytes(operation_desc); // Theoretical flops required for the computation result.flops = problem_.flops(operation_desc); // Measured runtime result.runtime = 0; } /// Initialize reduction problem dimensions and library::Operation bool Conv3dOperationProfiler::initialize_reduction_configuration_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); library::ConvKind const &conv_kind = conv_desc.conv_kind; if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) { return false; } if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) { return false; } /// This chooses the appropriate stride element of the row-major C tensor. int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 3 : 0); /// initialize library::ReductionConfiguration conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn(); conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices); conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product(); conv_workspace_.reduction_configuration.ldw = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.lds = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.ldd = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx]; // find reduction operation library::ReductionFunctionalKey reduction_key( library::Provider::kCUTLASS, conv_desc.tile_description.math_instruction.element_accumulator, // element workspace conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator conv_desc.C.element, // element output conv_desc.element_epilogue // element compute ); #if 0// debug print to check which reduction instance is selected std::cout << reduction_key << "\n"; #endif auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key); if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) { return false; } // initialize reduction operation required for parallel split-k conv2d operator reduction_op_ = reduction_it->second; // reduction operation found and initialized return true; } /// Initializes workspace Status Conv3dOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(underlying_operation->description()); // Compute the number of copies of the problem to avoid L2 camping. if (!options.profiling.workspace_count) { int64_t bytes = problem_.bytes(operation_desc); if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) { conv_workspace_.problem_count = 1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes); } else { conv_workspace_.problem_count = 1; } } else { conv_workspace_.problem_count = options.profiling.workspace_count; } if (options.execution_mode != ExecutionMode::kDryRun) { conv_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, problem_.extent_a(operation_desc.conv_kind), conv_workspace_.stride_a(operation_desc.conv_kind), conv_workspace_.problem_count ); conv_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, problem_.extent_b(operation_desc.conv_kind), conv_workspace_.stride_b(operation_desc.conv_kind), conv_workspace_.problem_count ); conv_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.stride_c(operation_desc.conv_kind), conv_workspace_.problem_count ); conv_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.stride_c(operation_desc.conv_kind), conv_workspace_.problem_count ); conv_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.stride_c(operation_desc.conv_kind), conv_workspace_.problem_count ); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration); conv_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration); conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = underlying_operation->initialize( &conv_workspace_.configuration, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (status != Status::kSuccess) { return status; } if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration); conv_workspace_.reduction_host_workspace.resize(workspace_size, 0); status = reduction_op_->initialize( &conv_workspace_.reduction_configuration, conv_workspace_.reduction_host_workspace.data(), nullptr); if (status != Status::kSuccess) { return status; } } } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kConv3d; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool Conv3dOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } hipError_t result; // Initialize structure containing Conv arguments set_cutlass_operator_arguments_(); conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data()); // // Run the CUTLASS operation // // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { results_.back().disposition = Disposition::kFailed; return false; } } #if 0 std::cout << "profiling : " << std::endl << "conv2d : " << operation->description().name << std::endl << "underlying conv2d : " << underlying_operation->description().name << std::endl << "reduction : " << reduction_op_->description().name << std::endl; #endif // run cutlass conv2d operation results_.back().status = underlying_operation->run( &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { results_.back().status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } } // Synchronize before running device reference result = hipDeviceSynchronize(); if (result != hipSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUDNN // Run verification cudnn reference if (options.verification.provider_enabled(library::Provider::kCUDNN)) { // Guard against unsupported cases auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description()); Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration); // Initialize reference data to the source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); if (status == Status::kSuccess) { // call cudnn verification if supported verify_with_cudnn_( options, report, device_context, operation, problem_space, problem); } else if (status == Status::kErrorInvalidProblem) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem; } else { // set verification map for cudnn to not supported results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUDNN // Run verification host reference if (options.verification.provider_enabled(library::Provider::kReferenceHost)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_host_reference_( options, report, device_context, operation, problem_space, problem); } // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv3dOperationProfiler::verify_with_host_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find host reference operation using conv functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv_key( library::Provider::kReferenceHost, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); #if 0 // debug print to check which host reference instance is selected std::cout << conv_key << "\n"; #endif auto operators_it = Singleton::get().operation_table.conv3d_operations.find(conv_key); if(operators_it == Singleton::get().operation_table.conv3d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // conv3d host reference minimum cc is 0 (CPU) and no iterator algorithm library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // host reference has only one instances in ConvOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Copy input tensors A, B, and C from device to host buffers // conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes()); conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes()); conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes()); conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data()); conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data()); conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data()); // // Initialize structure containing Conv3d arguments // conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data(); conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data(); conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Initialize host reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // // Run host reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified; return true; } // // Copy host reference output to device memory for equality check on device // conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D); // // Verify results // results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceHost); } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv3dOperationProfiler::verify_with_device_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { // TODO: verify cutlass conv3d against device reference // Return true means continue profiling return true; } /// Measures performance results bool Conv3dOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { set_cutlass_operator_arguments_(); results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data() ); } return true; } /// Updates the arguments structure for the CUTLASS operator based on /// the problem index. void Conv3dOperationProfiler::set_cutlass_operator_arguments_(int problem_idx) { // Initialize structure containing Conv3d arguments conv_workspace_.arguments.A = conv_workspace_.A->batch_data(problem_idx); conv_workspace_.arguments.B = conv_workspace_.B->batch_data(problem_idx); conv_workspace_.arguments.C = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.arguments.D = conv_workspace_.Computed->batch_data(problem_idx); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } } /// Method to profile a CUTLASS Operation Status Conv3dOperationProfiler::profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace) { GpuTimer timer; // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } // // Optional sleep to limit power consumption and thermals // sleep(options.profiling.sleep_duration); // // Warmup loop // Status status; for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { // Setup rotating workspace int workspace_idx = options.profiling.warmup_iterations + iteration; int problem_idx = (workspace_idx % conv_workspace_.problem_count); set_cutlass_operator_arguments_(problem_idx); // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Initialize GPU timer // timer.start(); // // Profiling loop // int Iterations = options.profiling.iterations; int iteration = 0; for (; iteration < Iterations; ++iteration) { // Setup rotating workspace int problem_idx = (iteration % conv_workspace_.problem_count); set_cutlass_operator_arguments_(problem_idx); // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Wait for completion // timer.stop_and_wait(); // // Update performance result // runtime = timer.duration(iteration); return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if CUTLASS_ENABLE_CUDNN /// Verifies CUTLASS against cudnn reference bool Conv3dOperationProfiler::verify_with_cudnn_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); // // Construct cudnn operators // CudnnCreate handle; cudnnStatus_t status = handle.get_cudnn_create_status(); if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Initialize state // // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // cuDNN does not support four tensor arguments, so we copy the tensor C data into // tensor D. conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); conv_workspace_.arguments.C = conv_workspace_.arguments.D; try { // // Construct dispatcher to cudnn operator // detail::cudnnConvDispatcher conv_op( conv_desc, conv_workspace_.configuration, conv_workspace_.arguments, handle ); if (conv_op.status != Status::kSuccess) { if (conv_op.status == Status::kErrorNotSupported) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } else { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } return true; } status = conv_op(handle); // Handle errors if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) { save_workspace( device_context, options, conv_desc, library::Provider::kCUTLASS, library::Provider::kCUDNN); } } catch (...) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } // Return true means continue profiling return true; } #endif // #if CUTLASS_ENABLE_CUDNN ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
4187d1860c4a00d32d8ffec1a2729127c68ee516.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Convolution 3D profiling */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "conv3d_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// using namespace cutlass::library; namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor Conv3dOperationProfiler::Conv3dOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kConv3d, { {ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"}, {ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"d", "input_d"}, "Input D dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"t", "filter_t"}, "Filter T dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"z", "output_z"}, "Output Z dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv3d problem space"}, {ArgumentTypeID::kInteger, {"pad_d"}, "Padding in D direction"}, {ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"}, {ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"}, {ArgumentTypeID::kInteger, {"stride_d"}, "Stride in D direction"}, {ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"}, {ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"}, {ArgumentTypeID::kInteger, {"dilation_d"}, "Dilation in D direction"}, {ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"}, {ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"}, {ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"}, {ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"}, {ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"}, {ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"}, {ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"}, }, { library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN } ) { description_ = " Conv3d operation. Output(Tensor5D) = alpha * Input(Tensor5D) * Filter(Tensor5D) + beta * Input(Tensor5D)"; } /// Destructor Conv3dOperationProfiler::~Conv3dOperationProfiler() { } /// Prints usage statement for the math function void Conv3dOperationProfiler::print_usage(std::ostream &out) const { out << "Conv3d" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void Conv3dOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular convolution (specify all the convolution parameters):\n" << " $ cutlass_profiler --operation=Conv3d" " --Activation=f16:ndhwc --Filter=f16:ndhwc --Output=f16 --accumulator-type=f32" " --n=32 --d=16 --h=14 --w=14 --c=8 --k=64 --t=3 --r=3 --s=3" " --pad_d=1 --pad_h=1 --pad_w=1" " --stride_d=1 --stride::h=1 --stride::w=1" " --dilation_d=1 --dilation::h=1 --dilation::w=1\n\n"; } #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif ///////////////////////////////////////////////////////////////////////////////////////////////// /// Total number of bytes loaded int64_t Conv3dOperationProfiler::Conv3dProblem::bytes(library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); // Input bytes read and Output bytes written for the gemm problem int64_t bytes_ = int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); } return bytes_; } /// Total number of flops computed int64_t Conv3dOperationProfiler::Conv3dProblem::flops( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2; int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2; // Adjust mainloop flop for dgrad strided if (operation_desc.conv_kind == library::ConvKind::kDgrad) { flops_mainloop_ = flops_mainloop_ / ( stride_d * stride_h * stride_w); } return (flops_mainloop_ + flops_epilogue_); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status Conv3dOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(operation->description()); if (!arg_as_int(problem_.n, "n", problem_space, problem)) { // default value problem_.n = 1; } if (!arg_as_int(problem_.d, "d", problem_space, problem)) { // default value problem_.d = 8; } if (!arg_as_int(problem_.h, "h", problem_space, problem)) { // default value problem_.h = 14; } if (!arg_as_int(problem_.w, "w", problem_space, problem)) { // default value problem_.w = 14; } if (!arg_as_int(problem_.c, "c", problem_space, problem)) { // default value problem_.c = 32; } if (!arg_as_int(problem_.k, "k", problem_space, problem)) { // default value problem_.k = 32; } if (!arg_as_int(problem_.t, "t", problem_space, problem)) { // default value problem_.t = 3; } if (!arg_as_int(problem_.r, "r", problem_space, problem)) { // default value problem_.r = 3; } if (!arg_as_int(problem_.s, "s", problem_space, problem)) { // default value problem_.s = 3; } if (!arg_as_int(problem_.pad_d, "pad_d", problem_space, problem)) { // default value problem_.pad_d = 1; } if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) { // default value problem_.pad_w = 1; } if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) { // default value problem_.pad_h = 1; } if (!arg_as_int(problem_.stride_d, "stride_d", problem_space, problem)) { // default value problem_.stride_d = 1; } if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) { // default value problem_.stride_h = 1; } if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) { // default value problem_.stride_w = 1; } if (!arg_as_int(problem_.dilation_d, "dilation_d", problem_space, problem)) { // default value problem_.dilation_d = 1; } if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) { // default value problem_.dilation_h = 1; } if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) { // default value problem_.dilation_w = 1; } //////////////////////// Convolution output dimensions p and q //////////////////////// // Cutlass convolutions support arbitrary output sizes and not constrained by // // input, filter, padding, striding, dilation sizes. // // cuDNN sets the output dimensions (p, q) using following equations: // // // // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // // where; div_up(a, b) : (a - 1)/b + 1 // // // // Thus, when output p and q dimensions are unspecified by the user // // cutlass profiler sets p and q which are cuDNN compliant. // // // //////////////////////////////////////////////////////////////////////////////////////// // set convolution output z if (!arg_as_int(problem_.z, "z", problem_space, problem)) { // default value (set using cudnn formula for output height, when p is not provided) problem_.z = ( problem_.d + 2 * problem_.pad_d - ((problem_.t - 1) * problem_.dilation_d + 1) ) / (problem_.stride_d) + 1; } // set convolution output p if (!arg_as_int(problem_.p, "p", problem_space, problem)) { // default value (set using cudnn formula for output height, when p is not provided) problem_.p = ( problem_.h + 2 * problem_.pad_h - ((problem_.r - 1) * problem_.dilation_h + 1) ) / (problem_.stride_h) + 1; } // set convolution output q if (!arg_as_int(problem_.q, "q", problem_space, problem)) { // default value (set using cudnn formula for output width, when q is not provided) problem_.q = ( problem_.w + 2 * problem_.pad_w - ((problem_.s - 1) * problem_.dilation_w + 1) ) / (problem_.stride_w) + 1; } ///////////////////////////////////////////////////////////////////////////////////////// if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) { // default value problem_.split_k_mode = library::SplitKMode::kSerial; } if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) { // default value problem_.split_k_slices = 1; } if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) { // default value problem_.conv_mode = library::ConvModeID::kCrossCorrelation; } if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) { // default value problem_.eq_gemm_provider = library::Provider::kNone; } if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( problem_.alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( problem_.beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } // initialize library::ConvConfiguration conv_workspace_.configuration.problem_size = conv::Conv3dProblemSize( int(problem_.n), int(problem_.d), int(problem_.h), int(problem_.w), int(problem_.c), int(problem_.k), int(problem_.t), int(problem_.r), int(problem_.s), int(problem_.z), int(problem_.p), int(problem_.q), int(problem_.pad_d), int(problem_.pad_h), int(problem_.pad_w), int(problem_.stride_d), int(problem_.stride_h), int(problem_.stride_w), int(problem_.dilation_d), int(problem_.dilation_h), int(problem_.dilation_w), static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)), int(problem_.split_k_slices), 1 // groups ); conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode)); conv_workspace_.configuration.layout_activations.stride() = make_Coord( int(problem_.c), int(problem_.w) * int(problem_.c), int(problem_.h) * int(problem_.w) * int(problem_.c), int(problem_.d) * int(problem_.h) * int(problem_.w) * int(problem_.c) ); conv_workspace_.configuration.layout_filters.stride() = make_Coord( int(problem_.c), int(problem_.s) * int(problem_.c), int(problem_.r) * int(problem_.s) * int(problem_.c), int(problem_.t) * int(problem_.r) * int(problem_.s) * int(problem_.c) ); conv_workspace_.configuration.layout_output.stride() = make_Coord( int(problem_.k), int(problem_.q) * int(problem_.k), int(problem_.q) * int(problem_.p) * int(problem_.k), int(problem_.z) * int(problem_.q) * int(problem_.p) * int(problem_.k) ); // initialize library::ConvArguments conv_workspace_.arguments.A = nullptr; conv_workspace_.arguments.B = nullptr; conv_workspace_.arguments.C = nullptr; conv_workspace_.arguments.D = nullptr; conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // initialize reduction operation for parallel splitKMode not supported for conv3d if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) { return Status::kErrorInternal; } } initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments); } /// Initializes the performance result void Conv3dOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::ConvDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; result.arguments.resize(problem_space.rank()); set_argument(result, "Activation", problem_space, std::string(library::to_string(operation_desc.activation().element)) + ":" + library::to_string(operation_desc.activation().layout)); set_argument(result, "Filter", problem_space, std::string(library::to_string(operation_desc.filter().element)) + ":" + library::to_string(operation_desc.filter().layout)); set_argument(result, "Output", problem_space, std::string(library::to_string(operation_desc.output().element)) + ":" + library::to_string(operation_desc.output().layout)); set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind)); set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm))); set_argument(result, "n", problem_space, problem_.n); set_argument(result, "d", problem_space, problem_.d); set_argument(result, "h", problem_space, problem_.h); set_argument(result, "w", problem_space, problem_.w); set_argument(result, "c", problem_space, problem_.c); set_argument(result, "k", problem_space, problem_.k); set_argument(result, "t", problem_space, problem_.t); set_argument(result, "r", problem_space, problem_.r); set_argument(result, "s", problem_space, problem_.s); set_argument(result, "z", problem_space, problem_.z); set_argument(result, "p", problem_space, problem_.p); set_argument(result, "q", problem_space, problem_.q); set_argument(result, "pad_d", problem_space, problem_.pad_d); set_argument(result, "pad_h", problem_space, problem_.pad_h); set_argument(result, "pad_w", problem_space, problem_.pad_w); set_argument(result, "stride_d", problem_space, problem_.stride_d); set_argument(result, "stride_h", problem_space, problem_.stride_h); set_argument(result, "stride_w", problem_space, problem_.stride_w); set_argument(result, "dilation_d", problem_space, problem_.dilation_d); set_argument(result, "dilation_h", problem_space, problem_.dilation_h); set_argument(result, "dilation_w", problem_space, problem_.dilation_w); set_argument(result, "split_k_mode", problem_space, std::string(library::to_string(problem_.split_k_mode))); set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices); set_argument(result, "conv_mode", problem_space, std::string(library::to_string(problem_.conv_mode))); set_argument(result, "alpha", problem_space, library::lexical_cast(problem_.alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(problem_.beta, operation_desc.element_epilogue)); set_argument(result, "eq_gemm_provider", problem_space, std::string(library::to_string(problem_.eq_gemm_provider))); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Bytes of activation, filter, and output tensors result.bytes = problem_.bytes(operation_desc); // Theoretical flops required for the computation result.flops = problem_.flops(operation_desc); // Measured runtime result.runtime = 0; } /// Initialize reduction problem dimensions and library::Operation bool Conv3dOperationProfiler::initialize_reduction_configuration_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); library::ConvKind const &conv_kind = conv_desc.conv_kind; if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) { return false; } if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) { return false; } /// This chooses the appropriate stride element of the row-major C tensor. int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 3 : 0); /// initialize library::ReductionConfiguration conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn(); conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices); conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product(); conv_workspace_.reduction_configuration.ldw = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.lds = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.ldd = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx]; // find reduction operation library::ReductionFunctionalKey reduction_key( library::Provider::kCUTLASS, conv_desc.tile_description.math_instruction.element_accumulator, // element workspace conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator conv_desc.C.element, // element output conv_desc.element_epilogue // element compute ); #if 0// debug print to check which reduction instance is selected std::cout << reduction_key << "\n"; #endif auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key); if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) { return false; } // initialize reduction operation required for parallel split-k conv2d operator reduction_op_ = reduction_it->second; // reduction operation found and initialized return true; } /// Initializes workspace Status Conv3dOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(underlying_operation->description()); // Compute the number of copies of the problem to avoid L2 camping. if (!options.profiling.workspace_count) { int64_t bytes = problem_.bytes(operation_desc); if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) { conv_workspace_.problem_count = 1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes); } else { conv_workspace_.problem_count = 1; } } else { conv_workspace_.problem_count = options.profiling.workspace_count; } if (options.execution_mode != ExecutionMode::kDryRun) { conv_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, problem_.extent_a(operation_desc.conv_kind), conv_workspace_.stride_a(operation_desc.conv_kind), conv_workspace_.problem_count ); conv_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, problem_.extent_b(operation_desc.conv_kind), conv_workspace_.stride_b(operation_desc.conv_kind), conv_workspace_.problem_count ); conv_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.stride_c(operation_desc.conv_kind), conv_workspace_.problem_count ); conv_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.stride_c(operation_desc.conv_kind), conv_workspace_.problem_count ); conv_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.stride_c(operation_desc.conv_kind), conv_workspace_.problem_count ); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration); conv_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration); conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = underlying_operation->initialize( &conv_workspace_.configuration, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (status != Status::kSuccess) { return status; } if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration); conv_workspace_.reduction_host_workspace.resize(workspace_size, 0); status = reduction_op_->initialize( &conv_workspace_.reduction_configuration, conv_workspace_.reduction_host_workspace.data(), nullptr); if (status != Status::kSuccess) { return status; } } } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kConv3d; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool Conv3dOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } cudaError_t result; // Initialize structure containing Conv arguments set_cutlass_operator_arguments_(); conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data()); // // Run the CUTLASS operation // // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { results_.back().disposition = Disposition::kFailed; return false; } } #if 0 std::cout << "profiling : " << std::endl << "conv2d : " << operation->description().name << std::endl << "underlying conv2d : " << underlying_operation->description().name << std::endl << "reduction : " << reduction_op_->description().name << std::endl; #endif // run cutlass conv2d operation results_.back().status = underlying_operation->run( &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { results_.back().status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } } // Synchronize before running device reference result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUDNN // Run verification cudnn reference if (options.verification.provider_enabled(library::Provider::kCUDNN)) { // Guard against unsupported cases auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description()); Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration); // Initialize reference data to the source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); if (status == Status::kSuccess) { // call cudnn verification if supported verify_with_cudnn_( options, report, device_context, operation, problem_space, problem); } else if (status == Status::kErrorInvalidProblem) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem; } else { // set verification map for cudnn to not supported results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUDNN // Run verification host reference if (options.verification.provider_enabled(library::Provider::kReferenceHost)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_host_reference_( options, report, device_context, operation, problem_space, problem); } // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv3dOperationProfiler::verify_with_host_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find host reference operation using conv functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv_key( library::Provider::kReferenceHost, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); #if 0 // debug print to check which host reference instance is selected std::cout << conv_key << "\n"; #endif auto operators_it = Singleton::get().operation_table.conv3d_operations.find(conv_key); if(operators_it == Singleton::get().operation_table.conv3d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // conv3d host reference minimum cc is 0 (CPU) and no iterator algorithm library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // host reference has only one instances in ConvOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Copy input tensors A, B, and C from device to host buffers // conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes()); conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes()); conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes()); conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data()); conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data()); conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data()); // // Initialize structure containing Conv3d arguments // conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data(); conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data(); conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Initialize host reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // // Run host reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified; return true; } // // Copy host reference output to device memory for equality check on device // conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D); // // Verify results // results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceHost); } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv3dOperationProfiler::verify_with_device_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { // TODO: verify cutlass conv3d against device reference // Return true means continue profiling return true; } /// Measures performance results bool Conv3dOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { set_cutlass_operator_arguments_(); results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data() ); } return true; } /// Updates the arguments structure for the CUTLASS operator based on /// the problem index. void Conv3dOperationProfiler::set_cutlass_operator_arguments_(int problem_idx) { // Initialize structure containing Conv3d arguments conv_workspace_.arguments.A = conv_workspace_.A->batch_data(problem_idx); conv_workspace_.arguments.B = conv_workspace_.B->batch_data(problem_idx); conv_workspace_.arguments.C = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.arguments.D = conv_workspace_.Computed->batch_data(problem_idx); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } } /// Method to profile a CUTLASS Operation Status Conv3dOperationProfiler::profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace) { GpuTimer timer; // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } // // Optional sleep to limit power consumption and thermals // sleep(options.profiling.sleep_duration); // // Warmup loop // Status status; for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { // Setup rotating workspace int workspace_idx = options.profiling.warmup_iterations + iteration; int problem_idx = (workspace_idx % conv_workspace_.problem_count); set_cutlass_operator_arguments_(problem_idx); // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Initialize GPU timer // timer.start(); // // Profiling loop // int Iterations = options.profiling.iterations; int iteration = 0; for (; iteration < Iterations; ++iteration) { // Setup rotating workspace int problem_idx = (iteration % conv_workspace_.problem_count); set_cutlass_operator_arguments_(problem_idx); // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Wait for completion // timer.stop_and_wait(); // // Update performance result // runtime = timer.duration(iteration); return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if CUTLASS_ENABLE_CUDNN /// Verifies CUTLASS against cudnn reference bool Conv3dOperationProfiler::verify_with_cudnn_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); // // Construct cudnn operators // CudnnCreate handle; cudnnStatus_t status = handle.get_cudnn_create_status(); if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Initialize state // // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // cuDNN does not support four tensor arguments, so we copy the tensor C data into // tensor D. conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); conv_workspace_.arguments.C = conv_workspace_.arguments.D; try { // // Construct dispatcher to cudnn operator // detail::cudnnConvDispatcher conv_op( conv_desc, conv_workspace_.configuration, conv_workspace_.arguments, handle ); if (conv_op.status != Status::kSuccess) { if (conv_op.status == Status::kErrorNotSupported) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } else { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } return true; } status = conv_op(handle); // Handle errors if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) { save_workspace( device_context, options, conv_desc, library::Provider::kCUTLASS, library::Provider::kCUDNN); } } catch (...) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } // Return true means continue profiling return true; } #endif // #if CUTLASS_ENABLE_CUDNN ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
b4ce3f8ce35817ddf2c9c13e95be8d049b937b0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void saxpy(int * a, int * b, int * c) { // Determine our unique global thread ID, so we know which element to process int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = tid; i < N; i += stride) c[i] = 2 * a[i] + b[i]; }
b4ce3f8ce35817ddf2c9c13e95be8d049b937b0e.cu
#include "includes.h" __global__ void saxpy(int * a, int * b, int * c) { // Determine our unique global thread ID, so we know which element to process int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = tid; i < N; i += stride) c[i] = 2 * a[i] + b[i]; }
e8e63672cc3186ca073991ddef13f23473f0efb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #include <omp.h> #include <roctracer/roctx.h> #define BLOCK_DIM_X 32 #define BLOCK_DIM_Y 32 #define CUDA_RT_CALL(call) \ { \ hipError_t cudaStatus = call; \ if (hipSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \ } constexpr int MAX_NUM_DEVICES = 32; constexpr float tol = 1.0e-8; const float PI = 2.0 * std::asin(1.0); __global__ void initialize_boundaries(float* a_new, float* a, const float pi, const int offset, const int nx, const int my_ny, const int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const float y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[iy * nx + 0] = y0; a[iy * nx + (nx - 1)] = y0; a_new[iy * nx + 0] = y0; a_new[iy * nx + (nx - 1)] = y0; } } __global__ void jacobi_kernel(float* a_new, const float* a, float* l2_norm, const int iy_start, const int iy_end, const int nx) { int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; __shared__ float block_l2_sum[BLOCK_DIM_X*BLOCK_DIM_Y]; unsigned thread_index = threadIdx.y*BLOCK_DIM_X + threadIdx.x; if (iy < iy_end && ix < (nx - 1)) { // Update grid point const float new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; float residue = new_val - a[iy * nx + ix]; // Set block-level L2 norm value for this grid point block_l2_sum[thread_index] = residue * residue; } else { block_l2_sum[thread_index] = 0; } // Reduce L2 norm for the block in parallel for (unsigned stride = 1; stride < BLOCK_DIM_X*BLOCK_DIM_Y; stride *= 2) { __syncthreads(); if ((thread_index) % (2*stride) == 0) { block_l2_sum[thread_index] += block_l2_sum[thread_index + stride]; } } // Atomically update global L2 norm with block-reduced L2 norm if (thread_index == 0) { atomicAdd(l2_norm, block_l2_sum[0]); } } int get_argval(char** begin, char** end, const std::string& arg, const int default_val) { int argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } int get_parsed_vals(char** begin, char **end, int* devices, const std::string& arg, const int default_val) { int numGPUs = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { numGPUs = 0; std::string dev_ids(*itr); int currpos = 0, nextpos = 0; do { nextpos = dev_ids.find_first_of(",", currpos); devices[numGPUs] = stoi(dev_ids.substr(currpos, nextpos)); numGPUs++; currpos = nextpos + 1; } while (nextpos != std::string::npos); } else { for (int i = 0; i < numGPUs; i++) { devices[i] = i; } } return numGPUs; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } double single_gpu(const int nx, const int ny, const int iter_max, float* const a_ref_h); int main(int argc, char* argv[]) { const int iter_max = get_argval(argv, argv + argc, "-niter", 1000); const int nx = get_argval(argv, argv + argc, "-nx", 16384); const int ny = get_argval(argv, argv + argc, "-ny", 16384); const bool p2p = get_arg(argv, argv + argc, "-p2p"); // Get GPU mapping from runtime arguments int available_devices = 0; CUDA_RT_CALL(hipGetDeviceCount(&available_devices)); int devices[MAX_NUM_DEVICES]; int num_devices = get_parsed_vals(argv, argv + argc, devices, "-gpus", available_devices); float* a[MAX_NUM_DEVICES]; float* a_new[MAX_NUM_DEVICES]; float* a_ref_h; float* a_h; double runtime_serial = 0.0; float* l2_norm_d[MAX_NUM_DEVICES]; float* l2_norm_h[MAX_NUM_DEVICES]; int iy_start[MAX_NUM_DEVICES]; int iy_end[MAX_NUM_DEVICES]; int chunk_size[MAX_NUM_DEVICES]; // Compute chunk size and allocate memory on GPUs for (int dev_id = 0; dev_id < num_devices; ++dev_id) { CUDA_RT_CALL(hipSetDevice(devices[dev_id])); CUDA_RT_CALL(hipFree(0)); if (0 == dev_id) { // Allocate memory on host and record single-GPU timings CUDA_RT_CALL(hipHostMalloc(&a_ref_h, nx * ny * sizeof(float))); CUDA_RT_CALL(hipHostMalloc(&a_h, nx * ny * sizeof(float))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h); } // ny - 2 rows are distributed amongst `size` ranks in such a way // that each rank gets either (ny - 2) / size or (ny - 2) / size + 1 rows. // This optimizes load balancing when (ny - 2) % size != 0 int chunk_size_low = (ny - 2) / num_devices; int chunk_size_high = chunk_size_low + 1; // To calculate the number of ranks that need to compute an extra row, // the following formula is derived from this equation: // num_ranks_low * chunk_size_low + (size - num_ranks_low) * (chunk_size_low + 1) = (ny - 2) int num_ranks_low = num_devices * chunk_size_low + num_devices - (ny - 2); if (dev_id < num_ranks_low) chunk_size[dev_id] = chunk_size_low; else chunk_size[dev_id] = chunk_size_high; // Allocate memory on GPU CUDA_RT_CALL(hipMalloc(a + dev_id, nx * (chunk_size[dev_id] + 2) * sizeof(float))); CUDA_RT_CALL(hipMalloc(a_new + dev_id, nx * (chunk_size[dev_id] + 2) * sizeof(float))); CUDA_RT_CALL(hipMemset(a[dev_id], 0, nx * (chunk_size[dev_id] + 2) * sizeof(float))); CUDA_RT_CALL(hipMemset(a_new[dev_id], 0, nx * (chunk_size[dev_id] + 2) * sizeof(float))); // Calculate local domain boundaries int iy_start_global; // My start index in the global array if (dev_id < num_ranks_low) { iy_start_global = dev_id * chunk_size_low + 1; } else { iy_start_global = num_ranks_low * chunk_size_low + (dev_id - num_ranks_low) * chunk_size_high + 1; } iy_start[dev_id] = 1; iy_end[dev_id] = iy_start[dev_id] + chunk_size[dev_id]; // Set dirichlet boundary conditions on left and right boarder hipLaunchKernelGGL(( initialize_boundaries), dim3((ny / num_devices) / 128 + 1), dim3(128), 0, 0, a[dev_id], a_new[dev_id], PI, iy_start_global - 1, nx, (chunk_size[dev_id] + 2), ny); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipMalloc(l2_norm_d + dev_id, sizeof(float))); CUDA_RT_CALL(hipHostMalloc(l2_norm_h + dev_id, sizeof(float))); if (p2p == true) { const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); int canAccessPeer = 0; // TODO: Part 2- Check whether GPU "devices[dev_id]" can access peer "devices[top]" CUDA_RT_CALL(hipDeviceCanAccessPeer(&canAccessPeer, devices[dev_id], devices[top])); if (canAccessPeer) { // TODO: Part 2- Enable peer access from GPU "devices[dev_id]" to "devices[top]" CUDA_RT_CALL(hipDeviceEnablePeerAccess(devices[top], 0)); } const int bottom = (dev_id + 1) % num_devices; if (top != bottom) { canAccessPeer = 0; // TODO: Part 2- Check and enable peer access from GPU "devices[dev_id]" to // "devices[bottom]", whenever possible CUDA_RT_CALL(hipDeviceCanAccessPeer(&canAccessPeer, devices[dev_id], devices[bottom])); if (canAccessPeer) { CUDA_RT_CALL(hipDeviceEnablePeerAccess(devices[bottom], 0)); } } } CUDA_RT_CALL(hipDeviceSynchronize()); } // Share initial top and bottom local grid-point values between neighbours for (int dev_id = 0; dev_id < num_devices; ++dev_id) { CUDA_RT_CALL(hipSetDevice(devices[dev_id])); const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); const int bottom = (dev_id + 1) % num_devices; CUDA_RT_CALL(hipMemcpy(a_new[top] + (iy_end[top] * nx), a_new[dev_id] + iy_start[dev_id] * nx, nx * sizeof(float), hipMemcpyDeviceToDevice)); CUDA_RT_CALL(hipMemcpy(a_new[bottom], a_new[dev_id] + (iy_end[dev_id] - 1) * nx, nx * sizeof(float), hipMemcpyDeviceToDevice)); } for (int dev_id = 0; dev_id < num_devices; ++dev_id) { CUDA_RT_CALL(hipSetDevice(devices[dev_id])); CUDA_RT_CALL(hipDeviceSynchronize()); } printf("Jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, nx, ny); dim3 dim_block(BLOCK_DIM_X, BLOCK_DIM_Y, 1); int iter = 0; float l2_norm = 1.0; double start = omp_get_wtime(); roctxRangePush("Jacobi solve"); while (l2_norm > tol && iter < iter_max) { // Launch device kernel on each GPU for (int dev_id = 0; dev_id < num_devices; ++dev_id) { // TODO: Part 1- Set current GPU to be "devices[dev_id]" CUDA_RT_CALL(hipSetDevice(devices[dev_id])); CUDA_RT_CALL(hipMemsetAsync(l2_norm_d[dev_id], 0, sizeof(float))); dim3 dim_grid((nx + BLOCK_DIM_X - 1) / BLOCK_DIM_X, (chunk_size[dev_id] + BLOCK_DIM_Y - 1) / BLOCK_DIM_Y, 1); // TODO: Part 1- Call Jacobi kernel with "dim_grid" blocks in grid and "dim_block" // blocks per thread. "dev_id" variable points to corresponding memory allocated // for the current GPU. hipLaunchKernelGGL(( jacobi_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, a_new[dev_id], a[dev_id], l2_norm_d[dev_id], iy_start[dev_id], iy_end[dev_id], nx); // TODO: Part 1- Copy GPU-local L2 norm "l2_norm_d" back to CPU "l2_norm_h" CUDA_RT_CALL(hipMemcpyAsync(l2_norm_h[dev_id], l2_norm_d[dev_id], sizeof(float), hipMemcpyDeviceToHost)); } // Launch async memory copy operations for halo exchange and // for copying local-grid L2 norm from each GPU to host for (int dev_id = 0; dev_id < num_devices; ++dev_id) { const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); const int bottom = (dev_id + 1) % num_devices; // TODO: Part 1- Set current GPU CUDA_RT_CALL(hipSetDevice(devices[dev_id])); // TODO: Part 1- Implement halo exchange with top neighbour "top" CUDA_RT_CALL(hipMemcpyAsync(a_new[top] + (iy_end[top] * nx), a_new[dev_id] + iy_start[dev_id] * nx, nx * sizeof(float), hipMemcpyDeviceToDevice)); // TODO: Part 1- Implement halo exchange with bottom neighbour "bottom" CUDA_RT_CALL(hipMemcpyAsync(a_new[bottom], a_new[dev_id] + (iy_end[dev_id] - 1) * nx, nx * sizeof(float), hipMemcpyDeviceToDevice)); } l2_norm = 0.0; // Synchronize devices and compute global L2 norm for (int dev_id = 0; dev_id < num_devices; ++dev_id) { // TODO: part 1- Set current GPU and call hipDeviceSynchronize() CUDA_RT_CALL(hipSetDevice(devices[dev_id])); CUDA_RT_CALL(hipDeviceSynchronize()); l2_norm += *(l2_norm_h[dev_id]); } l2_norm = std::sqrt(l2_norm); iter++; if ((iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm); for (int dev_id = 0; dev_id < num_devices; ++dev_id) { std::swap(a_new[dev_id], a[dev_id]); } } roctxRangePop(); double stop = omp_get_wtime(); int offset = nx; // Copy computed grid back to host from each GPU for (int dev_id = 0; dev_id < num_devices; ++dev_id) { CUDA_RT_CALL( hipMemcpy(a_h + offset, a[dev_id] + nx, ::min((nx * ny) - offset, nx * chunk_size[dev_id]) * sizeof(float), hipMemcpyDeviceToHost)); offset += ::min(chunk_size[dev_id] * nx, (nx * ny) - offset); } // Compare against single GPU execution for correctness bool result_correct = true; for (int iy = 1; result_correct && (iy < (ny - 1)); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR: a[%d * %d + %d] = %f does not match %f " "(reference)\n", iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } if (result_correct) { printf("Num GPUs: %d. Using GPU ID: ", num_devices); for (int i = 0; i < num_devices; i++) { printf("%d, ", devices[i]); } printf( "\n%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, num_devices, (stop - start), runtime_serial / (stop - start), runtime_serial / (num_devices * (stop - start)) * 100); } for (int dev_id = (num_devices - 1); dev_id >= 0; --dev_id) { CUDA_RT_CALL(hipSetDevice(dev_id)); CUDA_RT_CALL(hipHostFree(l2_norm_h[dev_id])); CUDA_RT_CALL(hipFree(l2_norm_d[dev_id])); CUDA_RT_CALL(hipFree(a_new[dev_id])); CUDA_RT_CALL(hipFree(a[dev_id])); if (0 == dev_id) { CUDA_RT_CALL(hipHostFree(a_h)); CUDA_RT_CALL(hipHostFree(a_ref_h)); } } return result_correct ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, float* const a_ref_h) { float* a; float* a_new; float* l2_norm_d; float* l2_norm_h; int iy_start = 1; int iy_end = (ny - 1); CUDA_RT_CALL(hipMalloc(&a, nx * ny * sizeof(float))); CUDA_RT_CALL(hipMalloc(&a_new, nx * ny * sizeof(float))); CUDA_RT_CALL(hipMemset(a, 0, nx * ny * sizeof(float))); CUDA_RT_CALL(hipMemset(a_new, 0, nx * ny * sizeof(float))); // Set diriclet boundary conditions on left and right boarder roctxRangePush("Init boundaries"); hipLaunchKernelGGL(( initialize_boundaries), dim3(ny / 128 + 1), dim3(128), 0, 0, a, a_new, PI, 0, nx, ny, ny); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); roctxRangePop(); CUDA_RT_CALL(hipMalloc(&l2_norm_d, sizeof(float))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_h, sizeof(float))); CUDA_RT_CALL(hipDeviceSynchronize()); printf("Single GPU jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, nx, ny); dim3 dim_grid((nx + BLOCK_DIM_X - 1) / BLOCK_DIM_X, (ny + BLOCK_DIM_Y - 1) / BLOCK_DIM_Y, 1); dim3 dim_block(BLOCK_DIM_X, BLOCK_DIM_Y, 1); int iter = 0; float l2_norm = 1.0; double start = omp_get_wtime(); roctxRangePush("Jacobi Solve"); while (l2_norm > tol && iter < iter_max) { CUDA_RT_CALL(hipMemset(l2_norm_d, 0, sizeof(float))); // Compute grid points for this iteration hipLaunchKernelGGL(( jacobi_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, a_new, a, l2_norm_d, iy_start, iy_end, nx); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipMemcpy(l2_norm_h, l2_norm_d, sizeof(float), hipMemcpyDeviceToHost)); // Apply periodic boundary conditions CUDA_RT_CALL(hipMemcpy(a_new, a_new + (iy_end - 1) * nx, nx * sizeof(float), hipMemcpyDeviceToDevice)); CUDA_RT_CALL(hipMemcpy(a_new + iy_end * nx, a_new + iy_start * nx, nx * sizeof(float), hipMemcpyDeviceToDevice)); CUDA_RT_CALL(hipDeviceSynchronize()); l2_norm = *l2_norm_h; l2_norm = std::sqrt(l2_norm); iter++; if ((iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm); std::swap(a_new, a); } roctxRangePop(); double stop = omp_get_wtime(); CUDA_RT_CALL(hipMemcpy(a_ref_h, a, nx * ny * sizeof(float), hipMemcpyDeviceToHost)); CUDA_RT_CALL(hipHostFree(l2_norm_h)); CUDA_RT_CALL(hipFree(l2_norm_d)); CUDA_RT_CALL(hipFree(a_new)); CUDA_RT_CALL(hipFree(a)); return (stop - start); }
e8e63672cc3186ca073991ddef13f23473f0efb6.cu
/* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #include <omp.h> #include <nvToolsExt.h> #define BLOCK_DIM_X 32 #define BLOCK_DIM_Y 32 #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } constexpr int MAX_NUM_DEVICES = 32; constexpr float tol = 1.0e-8; const float PI = 2.0 * std::asin(1.0); __global__ void initialize_boundaries(float* a_new, float* a, const float pi, const int offset, const int nx, const int my_ny, const int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const float y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[iy * nx + 0] = y0; a[iy * nx + (nx - 1)] = y0; a_new[iy * nx + 0] = y0; a_new[iy * nx + (nx - 1)] = y0; } } __global__ void jacobi_kernel(float* a_new, const float* a, float* l2_norm, const int iy_start, const int iy_end, const int nx) { int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; __shared__ float block_l2_sum[BLOCK_DIM_X*BLOCK_DIM_Y]; unsigned thread_index = threadIdx.y*BLOCK_DIM_X + threadIdx.x; if (iy < iy_end && ix < (nx - 1)) { // Update grid point const float new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; float residue = new_val - a[iy * nx + ix]; // Set block-level L2 norm value for this grid point block_l2_sum[thread_index] = residue * residue; } else { block_l2_sum[thread_index] = 0; } // Reduce L2 norm for the block in parallel for (unsigned stride = 1; stride < BLOCK_DIM_X*BLOCK_DIM_Y; stride *= 2) { __syncthreads(); if ((thread_index) % (2*stride) == 0) { block_l2_sum[thread_index] += block_l2_sum[thread_index + stride]; } } // Atomically update global L2 norm with block-reduced L2 norm if (thread_index == 0) { atomicAdd(l2_norm, block_l2_sum[0]); } } int get_argval(char** begin, char** end, const std::string& arg, const int default_val) { int argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } int get_parsed_vals(char** begin, char **end, int* devices, const std::string& arg, const int default_val) { int numGPUs = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { numGPUs = 0; std::string dev_ids(*itr); int currpos = 0, nextpos = 0; do { nextpos = dev_ids.find_first_of(",", currpos); devices[numGPUs] = stoi(dev_ids.substr(currpos, nextpos)); numGPUs++; currpos = nextpos + 1; } while (nextpos != std::string::npos); } else { for (int i = 0; i < numGPUs; i++) { devices[i] = i; } } return numGPUs; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } double single_gpu(const int nx, const int ny, const int iter_max, float* const a_ref_h); int main(int argc, char* argv[]) { const int iter_max = get_argval(argv, argv + argc, "-niter", 1000); const int nx = get_argval(argv, argv + argc, "-nx", 16384); const int ny = get_argval(argv, argv + argc, "-ny", 16384); const bool p2p = get_arg(argv, argv + argc, "-p2p"); // Get GPU mapping from runtime arguments int available_devices = 0; CUDA_RT_CALL(cudaGetDeviceCount(&available_devices)); int devices[MAX_NUM_DEVICES]; int num_devices = get_parsed_vals(argv, argv + argc, devices, "-gpus", available_devices); float* a[MAX_NUM_DEVICES]; float* a_new[MAX_NUM_DEVICES]; float* a_ref_h; float* a_h; double runtime_serial = 0.0; float* l2_norm_d[MAX_NUM_DEVICES]; float* l2_norm_h[MAX_NUM_DEVICES]; int iy_start[MAX_NUM_DEVICES]; int iy_end[MAX_NUM_DEVICES]; int chunk_size[MAX_NUM_DEVICES]; // Compute chunk size and allocate memory on GPUs for (int dev_id = 0; dev_id < num_devices; ++dev_id) { CUDA_RT_CALL(cudaSetDevice(devices[dev_id])); CUDA_RT_CALL(cudaFree(0)); if (0 == dev_id) { // Allocate memory on host and record single-GPU timings CUDA_RT_CALL(cudaMallocHost(&a_ref_h, nx * ny * sizeof(float))); CUDA_RT_CALL(cudaMallocHost(&a_h, nx * ny * sizeof(float))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h); } // ny - 2 rows are distributed amongst `size` ranks in such a way // that each rank gets either (ny - 2) / size or (ny - 2) / size + 1 rows. // This optimizes load balancing when (ny - 2) % size != 0 int chunk_size_low = (ny - 2) / num_devices; int chunk_size_high = chunk_size_low + 1; // To calculate the number of ranks that need to compute an extra row, // the following formula is derived from this equation: // num_ranks_low * chunk_size_low + (size - num_ranks_low) * (chunk_size_low + 1) = (ny - 2) int num_ranks_low = num_devices * chunk_size_low + num_devices - (ny - 2); if (dev_id < num_ranks_low) chunk_size[dev_id] = chunk_size_low; else chunk_size[dev_id] = chunk_size_high; // Allocate memory on GPU CUDA_RT_CALL(cudaMalloc(a + dev_id, nx * (chunk_size[dev_id] + 2) * sizeof(float))); CUDA_RT_CALL(cudaMalloc(a_new + dev_id, nx * (chunk_size[dev_id] + 2) * sizeof(float))); CUDA_RT_CALL(cudaMemset(a[dev_id], 0, nx * (chunk_size[dev_id] + 2) * sizeof(float))); CUDA_RT_CALL(cudaMemset(a_new[dev_id], 0, nx * (chunk_size[dev_id] + 2) * sizeof(float))); // Calculate local domain boundaries int iy_start_global; // My start index in the global array if (dev_id < num_ranks_low) { iy_start_global = dev_id * chunk_size_low + 1; } else { iy_start_global = num_ranks_low * chunk_size_low + (dev_id - num_ranks_low) * chunk_size_high + 1; } iy_start[dev_id] = 1; iy_end[dev_id] = iy_start[dev_id] + chunk_size[dev_id]; // Set dirichlet boundary conditions on left and right boarder initialize_boundaries<<<(ny / num_devices) / 128 + 1, 128>>>( a[dev_id], a_new[dev_id], PI, iy_start_global - 1, nx, (chunk_size[dev_id] + 2), ny); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaMalloc(l2_norm_d + dev_id, sizeof(float))); CUDA_RT_CALL(cudaMallocHost(l2_norm_h + dev_id, sizeof(float))); if (p2p == true) { const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); int canAccessPeer = 0; // TODO: Part 2- Check whether GPU "devices[dev_id]" can access peer "devices[top]" CUDA_RT_CALL(cudaDeviceCanAccessPeer(&canAccessPeer, devices[dev_id], devices[top])); if (canAccessPeer) { // TODO: Part 2- Enable peer access from GPU "devices[dev_id]" to "devices[top]" CUDA_RT_CALL(cudaDeviceEnablePeerAccess(devices[top], 0)); } const int bottom = (dev_id + 1) % num_devices; if (top != bottom) { canAccessPeer = 0; // TODO: Part 2- Check and enable peer access from GPU "devices[dev_id]" to // "devices[bottom]", whenever possible CUDA_RT_CALL(cudaDeviceCanAccessPeer(&canAccessPeer, devices[dev_id], devices[bottom])); if (canAccessPeer) { CUDA_RT_CALL(cudaDeviceEnablePeerAccess(devices[bottom], 0)); } } } CUDA_RT_CALL(cudaDeviceSynchronize()); } // Share initial top and bottom local grid-point values between neighbours for (int dev_id = 0; dev_id < num_devices; ++dev_id) { CUDA_RT_CALL(cudaSetDevice(devices[dev_id])); const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); const int bottom = (dev_id + 1) % num_devices; CUDA_RT_CALL(cudaMemcpy(a_new[top] + (iy_end[top] * nx), a_new[dev_id] + iy_start[dev_id] * nx, nx * sizeof(float), cudaMemcpyDeviceToDevice)); CUDA_RT_CALL(cudaMemcpy(a_new[bottom], a_new[dev_id] + (iy_end[dev_id] - 1) * nx, nx * sizeof(float), cudaMemcpyDeviceToDevice)); } for (int dev_id = 0; dev_id < num_devices; ++dev_id) { CUDA_RT_CALL(cudaSetDevice(devices[dev_id])); CUDA_RT_CALL(cudaDeviceSynchronize()); } printf("Jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, nx, ny); dim3 dim_block(BLOCK_DIM_X, BLOCK_DIM_Y, 1); int iter = 0; float l2_norm = 1.0; double start = omp_get_wtime(); nvtxRangePush("Jacobi solve"); while (l2_norm > tol && iter < iter_max) { // Launch device kernel on each GPU for (int dev_id = 0; dev_id < num_devices; ++dev_id) { // TODO: Part 1- Set current GPU to be "devices[dev_id]" CUDA_RT_CALL(cudaSetDevice(devices[dev_id])); CUDA_RT_CALL(cudaMemsetAsync(l2_norm_d[dev_id], 0, sizeof(float))); dim3 dim_grid((nx + BLOCK_DIM_X - 1) / BLOCK_DIM_X, (chunk_size[dev_id] + BLOCK_DIM_Y - 1) / BLOCK_DIM_Y, 1); // TODO: Part 1- Call Jacobi kernel with "dim_grid" blocks in grid and "dim_block" // blocks per thread. "dev_id" variable points to corresponding memory allocated // for the current GPU. jacobi_kernel<<<dim_grid, dim_block>>>( a_new[dev_id], a[dev_id], l2_norm_d[dev_id], iy_start[dev_id], iy_end[dev_id], nx); // TODO: Part 1- Copy GPU-local L2 norm "l2_norm_d" back to CPU "l2_norm_h" CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_h[dev_id], l2_norm_d[dev_id], sizeof(float), cudaMemcpyDeviceToHost)); } // Launch async memory copy operations for halo exchange and // for copying local-grid L2 norm from each GPU to host for (int dev_id = 0; dev_id < num_devices; ++dev_id) { const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); const int bottom = (dev_id + 1) % num_devices; // TODO: Part 1- Set current GPU CUDA_RT_CALL(cudaSetDevice(devices[dev_id])); // TODO: Part 1- Implement halo exchange with top neighbour "top" CUDA_RT_CALL(cudaMemcpyAsync(a_new[top] + (iy_end[top] * nx), a_new[dev_id] + iy_start[dev_id] * nx, nx * sizeof(float), cudaMemcpyDeviceToDevice)); // TODO: Part 1- Implement halo exchange with bottom neighbour "bottom" CUDA_RT_CALL(cudaMemcpyAsync(a_new[bottom], a_new[dev_id] + (iy_end[dev_id] - 1) * nx, nx * sizeof(float), cudaMemcpyDeviceToDevice)); } l2_norm = 0.0; // Synchronize devices and compute global L2 norm for (int dev_id = 0; dev_id < num_devices; ++dev_id) { // TODO: part 1- Set current GPU and call cudaDeviceSynchronize() CUDA_RT_CALL(cudaSetDevice(devices[dev_id])); CUDA_RT_CALL(cudaDeviceSynchronize()); l2_norm += *(l2_norm_h[dev_id]); } l2_norm = std::sqrt(l2_norm); iter++; if ((iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm); for (int dev_id = 0; dev_id < num_devices; ++dev_id) { std::swap(a_new[dev_id], a[dev_id]); } } nvtxRangePop(); double stop = omp_get_wtime(); int offset = nx; // Copy computed grid back to host from each GPU for (int dev_id = 0; dev_id < num_devices; ++dev_id) { CUDA_RT_CALL( cudaMemcpy(a_h + offset, a[dev_id] + nx, std::min((nx * ny) - offset, nx * chunk_size[dev_id]) * sizeof(float), cudaMemcpyDeviceToHost)); offset += std::min(chunk_size[dev_id] * nx, (nx * ny) - offset); } // Compare against single GPU execution for correctness bool result_correct = true; for (int iy = 1; result_correct && (iy < (ny - 1)); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (std::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR: a[%d * %d + %d] = %f does not match %f " "(reference)\n", iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } if (result_correct) { printf("Num GPUs: %d. Using GPU ID: ", num_devices); for (int i = 0; i < num_devices; i++) { printf("%d, ", devices[i]); } printf( "\n%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, num_devices, (stop - start), runtime_serial / (stop - start), runtime_serial / (num_devices * (stop - start)) * 100); } for (int dev_id = (num_devices - 1); dev_id >= 0; --dev_id) { CUDA_RT_CALL(cudaSetDevice(dev_id)); CUDA_RT_CALL(cudaFreeHost(l2_norm_h[dev_id])); CUDA_RT_CALL(cudaFree(l2_norm_d[dev_id])); CUDA_RT_CALL(cudaFree(a_new[dev_id])); CUDA_RT_CALL(cudaFree(a[dev_id])); if (0 == dev_id) { CUDA_RT_CALL(cudaFreeHost(a_h)); CUDA_RT_CALL(cudaFreeHost(a_ref_h)); } } return result_correct ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, float* const a_ref_h) { float* a; float* a_new; float* l2_norm_d; float* l2_norm_h; int iy_start = 1; int iy_end = (ny - 1); CUDA_RT_CALL(cudaMalloc(&a, nx * ny * sizeof(float))); CUDA_RT_CALL(cudaMalloc(&a_new, nx * ny * sizeof(float))); CUDA_RT_CALL(cudaMemset(a, 0, nx * ny * sizeof(float))); CUDA_RT_CALL(cudaMemset(a_new, 0, nx * ny * sizeof(float))); // Set diriclet boundary conditions on left and right boarder nvtxRangePush("Init boundaries"); initialize_boundaries<<<ny / 128 + 1, 128>>>(a, a_new, PI, 0, nx, ny, ny); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); nvtxRangePop(); CUDA_RT_CALL(cudaMalloc(&l2_norm_d, sizeof(float))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_h, sizeof(float))); CUDA_RT_CALL(cudaDeviceSynchronize()); printf("Single GPU jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, nx, ny); dim3 dim_grid((nx + BLOCK_DIM_X - 1) / BLOCK_DIM_X, (ny + BLOCK_DIM_Y - 1) / BLOCK_DIM_Y, 1); dim3 dim_block(BLOCK_DIM_X, BLOCK_DIM_Y, 1); int iter = 0; float l2_norm = 1.0; double start = omp_get_wtime(); nvtxRangePush("Jacobi Solve"); while (l2_norm > tol && iter < iter_max) { CUDA_RT_CALL(cudaMemset(l2_norm_d, 0, sizeof(float))); // Compute grid points for this iteration jacobi_kernel<<<dim_grid, dim_block>>>(a_new, a, l2_norm_d, iy_start, iy_end, nx); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaMemcpy(l2_norm_h, l2_norm_d, sizeof(float), cudaMemcpyDeviceToHost)); // Apply periodic boundary conditions CUDA_RT_CALL(cudaMemcpy(a_new, a_new + (iy_end - 1) * nx, nx * sizeof(float), cudaMemcpyDeviceToDevice)); CUDA_RT_CALL(cudaMemcpy(a_new + iy_end * nx, a_new + iy_start * nx, nx * sizeof(float), cudaMemcpyDeviceToDevice)); CUDA_RT_CALL(cudaDeviceSynchronize()); l2_norm = *l2_norm_h; l2_norm = std::sqrt(l2_norm); iter++; if ((iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm); std::swap(a_new, a); } nvtxRangePop(); double stop = omp_get_wtime(); CUDA_RT_CALL(cudaMemcpy(a_ref_h, a, nx * ny * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_RT_CALL(cudaFreeHost(l2_norm_h)); CUDA_RT_CALL(cudaFree(l2_norm_d)); CUDA_RT_CALL(cudaFree(a_new)); CUDA_RT_CALL(cudaFree(a)); return (stop - start); }
cpu.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> #include "cpu.h" namespace StreamCompaction { namespace CPU { /** * CPU scan (prefix sum). */ void scan(int n, int *odata, const int *idata) { // TODO //cuda event init hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; hipEventRecord(start); odata[0] = 0; for (int i = 1; i<n; i++) { odata[i] = odata[i - 1] + idata[i - 1]; } hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << "cpu method: " << milliseconds << "ms" << std::endl; } /** * CPU stream compaction without using the scan function. * * @returns the number of elements remaining after compaction. */ int compactWithoutScan(int n, int *odata, const int *idata) { // TODO //iterate over the indata int cur_index = 0; for (int i = 0; i < n; i++) { if (idata[i]!=0) { odata[cur_index++] = idata[i]; } } return cur_index; } /** * CPU stream compaction using scan and scatter, like the parallel version. * * @returns the number of elements remaining after compaction. */ int compactWithScan(int n, int *odata, const int *idata) { // TODO int* idata_map = new int[n]; int* scan_sum = new int[n]; for (int i = 0; i<n; i++) { idata_map[i] = (idata[i] == 0) ? 0 : 1; } scan(n, scan_sum, idata_map); int num_remain = scatter(n, odata, scan_sum, idata_map,idata); return num_remain; } int scatter(int n, int *odata, const int *scan_sum, const int *idata_map, const int *idata) { int cur_num = 0; for (int i = 0; i<n; i++) { if (idata_map[i] == 1) { odata[scan_sum[i]] = idata[i]; cur_num++; } } return cur_num; } } }
cpu.cu
#include <cstdio> #include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include "cpu.h" namespace StreamCompaction { namespace CPU { /** * CPU scan (prefix sum). */ void scan(int n, int *odata, const int *idata) { // TODO //cuda event init cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start); odata[0] = 0; for (int i = 1; i<n; i++) { odata[i] = odata[i - 1] + idata[i - 1]; } cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << "cpu method: " << milliseconds << "ms" << std::endl; } /** * CPU stream compaction without using the scan function. * * @returns the number of elements remaining after compaction. */ int compactWithoutScan(int n, int *odata, const int *idata) { // TODO //iterate over the indata int cur_index = 0; for (int i = 0; i < n; i++) { if (idata[i]!=0) { odata[cur_index++] = idata[i]; } } return cur_index; } /** * CPU stream compaction using scan and scatter, like the parallel version. * * @returns the number of elements remaining after compaction. */ int compactWithScan(int n, int *odata, const int *idata) { // TODO int* idata_map = new int[n]; int* scan_sum = new int[n]; for (int i = 0; i<n; i++) { idata_map[i] = (idata[i] == 0) ? 0 : 1; } scan(n, scan_sum, idata_map); int num_remain = scatter(n, odata, scan_sum, idata_map,idata); return num_remain; } int scatter(int n, int *odata, const int *scan_sum, const int *idata_map, const int *idata) { int cur_num = 0; for (int i = 0; i<n; i++) { if (idata_map[i] == 1) { odata[scan_sum[i]] = idata[i]; cur_num++; } } return cur_num; } } }
76c035cf5cd0649c011c03674517ed85284c6781.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> const int width = 1920; const int height = 1080; struct hipComplex { float r; float i; __device__ hipComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2() { return r*r + i*i; } __device__ hipComplex operator*(const hipComplex &a) { return hipComplex(r*a.r-i*a.i, r*a.i + i*a.r); } __device__ hipComplex operator+(const hipComplex &a) { return hipComplex(r+a.r, i+a.i); } }; __device__ int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float)(width / 2 - x) / (width / 2); float jy = scale * (float)(height / 2 - y) / (height / 2); hipComplex c(-0.8, 0.156); hipComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000) return 0; } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int juliaValue = julia(x, y); ptr[offset * 4 + 0] = 255 * juliaValue; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main() { const int bitmapSize = 4 * width * height; unsigned char *hst_bitmap = (unsigned char *)malloc(bitmapSize); unsigned char *dev_bitmap; hipMalloc((void**)&dev_bitmap, bitmapSize); dim3 grid(width, height); kernel << <grid, 1 >> > (dev_bitmap); hipMemcpy(hst_bitmap, dev_bitmap, bitmapSize, hipMemcpyDeviceToHost); auto ofs = std::ofstream("bm.txt"); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) ofs << (int)hst_bitmap[4*(j*width + i)] << "\t"; ofs << std::endl; } ofs.close(); return 0; }
76c035cf5cd0649c011c03674517ed85284c6781.cu
#include <fstream> const int width = 1920; const int height = 1080; struct cuComplex { float r; float i; __device__ cuComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2() { return r*r + i*i; } __device__ cuComplex operator*(const cuComplex &a) { return cuComplex(r*a.r-i*a.i, r*a.i + i*a.r); } __device__ cuComplex operator+(const cuComplex &a) { return cuComplex(r+a.r, i+a.i); } }; __device__ int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float)(width / 2 - x) / (width / 2); float jy = scale * (float)(height / 2 - y) / (height / 2); cuComplex c(-0.8, 0.156); cuComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000) return 0; } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int juliaValue = julia(x, y); ptr[offset * 4 + 0] = 255 * juliaValue; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main() { const int bitmapSize = 4 * width * height; unsigned char *hst_bitmap = (unsigned char *)malloc(bitmapSize); unsigned char *dev_bitmap; cudaMalloc((void**)&dev_bitmap, bitmapSize); dim3 grid(width, height); kernel << <grid, 1 >> > (dev_bitmap); cudaMemcpy(hst_bitmap, dev_bitmap, bitmapSize, cudaMemcpyDeviceToHost); auto ofs = std::ofstream("bm.txt"); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) ofs << (int)hst_bitmap[4*(j*width + i)] << "\t"; ofs << std::endl; } ofs.close(); return 0; }
168b752e8e39bf5786a2a44912444d75900008d8.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sparse_fully_connected_1x1_layer_tester_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "neural_network_cusparse_exception.h" #include "../sparse_convolution_layer.h" namespace nnforge { namespace cuda { __global__ void copy_bias_sparse_kernel( const float * __restrict biases, float * __restrict output, int output_neuron_count, int entry_count) { int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int entry_id = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if ((output_neuron_id < output_neuron_count)) { float bias = biases[output_neuron_id]; float * current_output = output + (int)(entry_id * output_neuron_count + output_neuron_id); #pragma unroll for(int i = 0; i < 4; ++i) { if (entry_id < entry_count) *current_output = bias; current_output += output_neuron_count; entry_id++; } } } sparse_fully_connected_1x1_layer_tester_cuda::sparse_fully_connected_1x1_layer_tester_cuda() { } sparse_fully_connected_1x1_layer_tester_cuda::~sparse_fully_connected_1x1_layer_tester_cuda() { } void sparse_fully_connected_1x1_layer_tester_cuda::enqueue_test( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom, cuda_linear_buffer_device_smart_ptr input_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_entry, (entry_count + 4 - 1) / 4, 1); hipLaunchKernelGGL(( copy_bias_sparse_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[1], *additional_buffers[0], output_elem_count_per_entry, entry_count); cusparse_safe_call(hipsparseSetStream(cuda_config->get_cusparse_handle(), stream_id)); float alpha = 1.0F; float beta = 1.0F; hipsparseMatDescr_t mat_descr; cusparse_safe_call(hipsparseCreateMatDescr(&mat_descr)); cusparse_safe_call(hipsparseScsrmm( cuda_config->get_cusparse_handle(), HIPSPARSE_OPERATION_NON_TRANSPOSE, output_elem_count_per_entry, entry_count, input_elem_count_per_entry, feature_map_connection_count, &alpha, mat_descr, *data[0], *data_custom[1], *data_custom[0], *input_buffer, input_elem_count_per_entry, &beta, *additional_buffers[0], output_elem_count_per_entry)); } std::vector<size_t> sparse_fully_connected_1x1_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(output_elem_count_per_entry * sizeof(float)); return res; } cuda_linear_buffer_device_smart_ptr sparse_fully_connected_1x1_layer_tester_cuda::get_output_buffer( cuda_linear_buffer_device_smart_ptr input_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) { return additional_buffers[0]; } void sparse_fully_connected_1x1_layer_tester_cuda::tester_configured() { nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema); feature_map_connection_count = layer_derived->feature_map_connection_count; } } }
168b752e8e39bf5786a2a44912444d75900008d8.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sparse_fully_connected_1x1_layer_tester_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "neural_network_cusparse_exception.h" #include "../sparse_convolution_layer.h" namespace nnforge { namespace cuda { __global__ void copy_bias_sparse_kernel( const float * __restrict biases, float * __restrict output, int output_neuron_count, int entry_count) { int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int entry_id = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if ((output_neuron_id < output_neuron_count)) { float bias = biases[output_neuron_id]; float * current_output = output + (int)(entry_id * output_neuron_count + output_neuron_id); #pragma unroll for(int i = 0; i < 4; ++i) { if (entry_id < entry_count) *current_output = bias; current_output += output_neuron_count; entry_id++; } } } sparse_fully_connected_1x1_layer_tester_cuda::sparse_fully_connected_1x1_layer_tester_cuda() { } sparse_fully_connected_1x1_layer_tester_cuda::~sparse_fully_connected_1x1_layer_tester_cuda() { } void sparse_fully_connected_1x1_layer_tester_cuda::enqueue_test( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom, cuda_linear_buffer_device_smart_ptr input_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_entry, (entry_count + 4 - 1) / 4, 1); copy_bias_sparse_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *data[1], *additional_buffers[0], output_elem_count_per_entry, entry_count); cusparse_safe_call(cusparseSetStream(cuda_config->get_cusparse_handle(), stream_id)); float alpha = 1.0F; float beta = 1.0F; cusparseMatDescr_t mat_descr; cusparse_safe_call(cusparseCreateMatDescr(&mat_descr)); cusparse_safe_call(cusparseScsrmm( cuda_config->get_cusparse_handle(), CUSPARSE_OPERATION_NON_TRANSPOSE, output_elem_count_per_entry, entry_count, input_elem_count_per_entry, feature_map_connection_count, &alpha, mat_descr, *data[0], *data_custom[1], *data_custom[0], *input_buffer, input_elem_count_per_entry, &beta, *additional_buffers[0], output_elem_count_per_entry)); } std::vector<size_t> sparse_fully_connected_1x1_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(output_elem_count_per_entry * sizeof(float)); return res; } cuda_linear_buffer_device_smart_ptr sparse_fully_connected_1x1_layer_tester_cuda::get_output_buffer( cuda_linear_buffer_device_smart_ptr input_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) { return additional_buffers[0]; } void sparse_fully_connected_1x1_layer_tester_cuda::tester_configured() { nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema); feature_map_connection_count = layer_derived->feature_map_connection_count; } } }
b6baf8f275e2f67c3e8b3c0887365a094ad5b300.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <gaol/gaol.h> #include "helper_cuda.h" #include "cuda_interval_lib.h" #include <limits> #include <vector> #include <queue> #include <deque> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include "omp.h" #include <thread> #define NUM_THREADS 4 #define intervalEpsilon 0.001 #define outputEpsilon 0.001 #define K 10 // Sampling number #define DIM 2 // Dimension #define NEW_INTV_THRESHOLD 8 #define CPU_THRESHOLD 12 #define USE_GPU 1 using namespace std ; int syncFlag=0 ; __host__ __device__ float returnMax( float a, float b) { if(a > b) return a ; else return b ; } __host__ __device__ float returnMin( float a, float b) { if(a < b) return a ; else return b ; } //--- Returns the dimension which has the largest width int IntervalWidth( vector<gaol::interval> X ) { if(X.size()==0) { printf("Error: Interval list empty!!!" ) ; exit(-1) ; } float Width = X[0].width(); int index = 0 ; for(int i=0; i<X.size(); i++) { if(X[i].width() > Width) { Width = X[i].width() ; index = i ; } } return index ; } template <typename T> struct KernelArray { T* _array ; int _size ; } ; template <typename T> KernelArray<T> convertToKernel(thrust::device_vector<T>& dvec) { KernelArray<T> kArray ; kArray._array = thrust::raw_pointer_cast(&dvec[0]); kArray._size = (int) dvec.size(); return kArray ; } ; //From script gaol::interval cpu_Inclusion_Func_expr ( gaol::interval x, gaol::interval y) { gaol::interval z ; z = (1 - pow(x,2))*cos(5*y) ; return z ; } //End script //From script float cpu_Func_expr ( float x, float y) { float z = (1 - pow(x,2))*cos(5*y) ; return z ; } //End script //From script __device__ float gpu_Func_expr ( float x, float y ) { float z ; z = (1 - pow(x,2))*cos(5*y) ; return z ; } //Endscript //From script __device__ float gpu_func_array_expr ( float* var ) { float z ; z = (1 - pow(var[0],2))*cos(5*var[1]) ; return z ; } //End script __global__ void gpuKernel ( KernelArray<interval_gpu<double>> gpuMainQue, KernelArray<double> gpuPriQue, int dimension ) { //printf("Testing ................\n"); __shared__ interval_gpu<double> SharedIntervalList[DIM] ; // Warning thrown due to blank constructor __shared__ double RandSampleList[DIM] ; __shared__ double SharedIntDimSample[DIM][K]; float localSampleList[DIM] ; int tix = threadIdx.x ; int tiy = threadIdx.y ; float chunkSize = 0.0 ; // __syncthreads(); if(tiy==0) { SharedIntervalList[tix] = gpuMainQue._array[blockIdx.x*DIM + tix] ; // printf("Copied Interval = %f from thread= %d\n", SharedIntervalList[tix].lower(), tix); RandSampleList[tix] = (SharedIntervalList[tix].lower() + SharedIntervalList[tix].upper())/2 ; } __syncthreads(); chunkSize = (SharedIntervalList[tix].upper() - SharedIntervalList[tix].lower())/K ; SharedIntDimSample[tix][tiy] = SharedIntervalList[tix].lower() +tiy*chunkSize + chunkSize/2 ; //-- Midpoint __syncthreads() ; for(int m=0; m<DIM; m++) localSampleList[m] = RandSampleList[m] ; // localSampleList = RandSampleList; localSampleList[tix] = SharedIntDimSample[tix][tiy] ; // update the random sample for that thread SharedIntDimSample[tix][tiy] = gpu_func_array_expr(localSampleList) ; __syncthreads(); //----- Max reduce for the values per tix and load in RandSampleList(reuse the allocated memory- size of dimension) //----- Do a Max reduce --- int size = K ; for(int i=ceil((float)K/2) ; i>1; i = ceil((float)i/2)) { if(tiy < i && tiy+i<size-1) SharedIntDimSample[tix][tiy] = returnMax(SharedIntDimSample[tix][tiy] , SharedIntDimSample[tix][tiy+i]) ; size = i ; // SharedIntDimSample[tix][tiy] += SharedIntDimSample[tix][tiy + i] ; __syncthreads() ; } if(K > 1 && tiy==0) SharedIntDimSample[tix][0] = returnMax(SharedIntDimSample[tix][0], SharedIntDimSample[tix][1]) ; __syncthreads() ; //----- Max vaue across the dimensions ---- size = 0; for(int i=ceil((float)DIM/2) ; i > 1; i = ceil((float)i/2)) { if(tix < i && tix+i<size-1) SharedIntDimSample[tix][0] = returnMax(SharedIntDimSample[tix][0], SharedIntDimSample[tix+i][0]) ; size = i ; __syncthreads() ; } //if(tix==0 && tiy==0) { // if(DIM > 1) SharedIntDimSample[0][0] = returnMax(SharedIntDimSample[0][0], // SharedIntDimSample[1][0]) ; //--copy this priority value to the global memory //} if(tiy==0) if(DIM > 1)gpuPriQue._array[blockIdx.x*DIM + tix] = returnMax(SharedIntDimSample[0][0], SharedIntDimSample[1][0]); else gpuPriQue._array[blockIdx.x*DIM + tix] = SharedIntDimSample[0][0] ; // gpuPriQue._array[blockIdx.x*DIM + tix] = SharedIntDimSample[0][0] ; //----- The RandSampleList becomes the priority label array of SharedIntervalList ----- //----- Sort these and terminate //---- Sorting done with a thrust call at the host(execution will still be on device) __syncthreads(); } //ManageThreads.push_back(thread(gpuHandlerThread, gpuMainQue, gpuPriQue, dimension)) ; // Trigger the gpu thread void gpuHandlerThread ( KernelArray<interval_gpu<double>> gpuMainQue, KernelArray<double> gpuPriQue, int dimension) { // printf("Debug: 7: Got called...\n"); if(K*dimension > 512) { cout << "Reduce the K value" << endl ; } else { //dim3 dimBlock(K, dimension); dim3 dimBlock(dimension, K); dim3 dimGrid(gpuPriQue._size); hipLaunchKernelGGL(( gpuKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, gpuMainQue, gpuPriQue, dimension) ; hipDeviceSynchronize(); //thrust::device_vector<float> a_temp = *gpuPriQue._array ; // thrust::sort(gpuPriQue._array[0], gpuPriQue._array.end[4]) ; } syncFlag = 1 ; } //From script int dimension = 2 ; // From scipt - defined by the problem gaol::interval x_0(0,5), x_1(-7,5) ; // From script - defined by the problem //End script int main() { gaol::init(); omp_set_num_threads(NUM_THREADS); //--- Data structure for the gpu --- thrust::device_vector<interval_gpu<double>> gpu_interval_list ; thrust::device_vector<double> gpu_interval_priority ; thrust::device_vector<float> fbestTag ; KernelArray<interval_gpu<double>> gpuMainQue ; KernelArray<double> gpuPriQue ; //--- Data structure for the cpu --- float fbest = numeric_limits<float>::min(); vector<gaol::interval> bestbbInt(dimension) ; deque<gaol::interval> MainQue ; deque<gaol::interval> TempQue ; deque<float> MainQue_priority ; deque<float> TempQue_priority ; deque<thread> ManageThreads ; vector<gaol::interval> MidPoints(dimension) ; int addedIntervalSize = 0; // Holds information for threshold of gpu call int count=0; int gpuSize=0; //From script TempQue.push_back(x_0) ; // From script - initalise queue with starting intervals TempQue.push_back(x_1) ; //End script vector<gaol::interval> X(dimension); // Intervals to be used inside the while loop vector<gaol::interval> X1(dimension); // Intervals to be used inside the while loop vector<gaol::interval> X2(dimension); // Intervals to be used inside the while loop vector<vector<gaol::interval>> Xi ; gaol::interval FunctionBound ; //---- Initialise fbestTag -------- fbestTag.push_back(numeric_limits<float>::min()) ; //---- Get the priority of the starting interval ---- //From script gaol::interval PriTemp = cpu_Inclusion_Func_expr( (x_0.left() + x_0.right())/2 , (x_1.left() + x_1.right())/2 ) ; //End script for(int i=0; i<dimension; i++) TempQue_priority.push_back(PriTemp.left()) ; int loop_Counter = 0 ; //-- ManageThreads commented until gpu comes alive while( (int)TempQue_priority.size()> 0 || (int)MainQue_priority.size()>0 || (int)ManageThreads.size()>0) { // printf("Start: MainQue_priority = %d \n", (int)MainQue_priority.size()); // printf("Start: TempQue_priority = %d \n", (int)TempQue_priority.size()); // printf("Start: ManageThreads = %d \n", (int)ManageThreads.size()); // printf("Debug: 6: Reached here: Temp_Queue_Size = %lu, Main_Queue_size = %lu\n", TempQue.size(), MainQue.size()); // cout << " Idiot!!... Terminate while :)...." << endl ; if(USE_GPU==1 && syncFlag == 1 && ManageThreads.size() != 0 && count>=CPU_THRESHOLD) { // gpu calls to be joined if(ManageThreads.front().joinable()) ManageThreads.front().join() ; loop_Counter++ ; // if(loop_Counter==1) { // cout << "After the re-written priority from gpu " << endl ; // for(int i=0; i<(int)gpu_interval_priority.size(); i++) { // interval_gpu<double> temp = gpu_interval_list[i] ; // cout << temp.lower() << " : " << temp.upper() << " -- " << gpu_interval_priority[i] << endl ; // } //cout << "GPU_QUEUE_SIZE = " << (int)gpu_interval_priority.size() << endl ; cout << "GPU_QUEUE_SIZE RECEIVED= " << gpuPriQue._size << endl ; // } thrust::stable_sort_by_key(gpu_interval_priority.begin(), gpu_interval_priority.end(), gpu_interval_list.begin()) ; // cout << "After Sorting " << endl ; // for(int i=0; i<(int)gpu_interval_priority.size(); i++) { // interval_gpu<double> temp = gpu_interval_list[i] ; // cout << temp.lower() << " : " << temp.upper() << " -- " << gpu_interval_priority[i] << endl ; // } syncFlag = 0 ; count = 0 ; ManageThreads.pop_front() ; //MainQue.clear() ; for(int i=0; i < (int)gpu_interval_list.size()/dimension; i++) { // translate gpu return list to gaol for(int j=dimension-1; j>=0; j--) { interval_gpu<double> ij_gpu = gpu_interval_list[i*dimension + j] ; gaol::interval ij(ij_gpu.lower(), ij_gpu.upper()); MainQue.push_front(ij) ; MainQue_priority.push_front(gpu_interval_priority[i*dimension + j]) ; } //MainQue_priority.push_front(gpu_interval_priority[i]) ; } gpu_interval_priority.clear(); gpu_interval_list.clear(); } if((int)TempQue.size() != 0) { // cout << " TempQue-Size = " << TempQue_priority.size() << endl ; for(int i=0; i<(int)TempQue.size()/dimension; i++) { // push the TempQue to the MainQue for(int j=0; j < dimension; j++) { MainQue.push_back(TempQue[i*dimension+j]) ; MainQue_priority.push_back(TempQue_priority[i*dimension+j]) ; } //MainQue_priority.push_back(TempQue_priority[i]) ; //printf("Update-2: MainQue_priority = %d \n", (int)MainQue_priority.size()); } TempQue_priority.clear(); TempQue.clear(); } if(USE_GPU==1 && (int)MainQue_priority.size()/dimension - CPU_THRESHOLD > NEW_INTV_THRESHOLD && ManageThreads.size()==0 ) { // minimum number of new intervals to trigger gpu // gpu_interval_list.clear(); // gpu_interval_priority.clear(); cout << "MainQueSize = " << (int)MainQue.size() << endl ; for(int i=CPU_THRESHOLD; i<(int)MainQue.size()/dimension; i++) { for(int j=0; j<dimension; j++) { interval_gpu<double> ij(MainQue[i*dimension+j].left(), MainQue[i*dimension+j].right()) ; gpu_interval_list.push_back(ij) ; //cout << " Enter here " << gpu_interval_list.size() << endl ; gpu_interval_priority.push_back(MainQue_priority[i*dimension+j]) ; } //gpu_interval_priority.push_back(MainQue_priority[i]) ; } // cout << "Starting New GPU call of size " << gpu_interval_priority.size() << endl ; // for(int i=0; i<(int)gpu_interval_priority.size(); i++) { // interval_gpu<double> temp = gpu_interval_list[i] ; // cout << temp.lower() << " : " << temp.upper() << " -- " << gpu_interval_priority[i] << endl ; // } //--- Clear the intervals that has been provided to the gpu --- MainQue.erase(MainQue.begin() + (CPU_THRESHOLD)*dimension , MainQue.end()) ; MainQue_priority.erase(MainQue_priority.begin() + (CPU_THRESHOLD)*dimension, MainQue_priority.end()) ; /* KernelArray<interval_gpu<float>>*/ gpuMainQue = convertToKernel(gpu_interval_list); /* KernelArray<float>*/ gpuPriQue = convertToKernel(gpu_interval_priority); cout << " GPU_QUEUE_SIZE SENT = " << gpuPriQue._size << endl ; ManageThreads.push_back(thread(gpuHandlerThread, gpuMainQue, gpuPriQue, dimension)) ; // Trigger the gpu thread } fbest = returnMax(fbest, fbestTag.front()); X.clear(); X1.clear(); X2.clear(); Xi.clear(); //printf("MainQueueSize = %lu, TempQueueSize = %lu\n", MainQue_priority.size(), TempQue.size()); for(int i=0; i<dimension; i++) { X.push_back(MainQue.front()); MainQue.pop_front(); MainQue_priority.pop_front(); } // MainQue_priority.pop_front(); count++ ; //From script FunctionBound = cpu_Inclusion_Func_expr( X[0], X[1] ) ; // possibly from script //End script if ( FunctionBound.right() < fbest || X[IntervalWidth(X)].width() <= intervalEpsilon || FunctionBound.width() <= outputEpsilon ) { //printf("GetNextElement\n"); //cout << "Current-Size = " << (int)MainQue_priority.size()/dimension << endl ; } else { for(int i=0; i<dimension; i++) { if(i == IntervalWidth(X)) { gaol::interval a(X[i].left(), X[i].left() + X[i].width()/2 ) ; gaol::interval b(X[i].left() + X[i].width()/2, X[i].right() ) ; X1.push_back(a); X2.push_back(b); } else { X1.push_back(X[i]); X2.push_back(X[i]) ; } } Xi.push_back(X1); Xi.push_back(X2) ; for(int i=0; i< 2; i++) { //From script float ei = cpu_Func_expr( Xi[i][0].width()/2 + Xi[i][0].left() , Xi[i][1].width()/2 + Xi[i][1].left() ); //End script if(ei > fbest) { fbest = ei ; bestbbInt = Xi[i] ; } //printf("fbest = %f , Ei = %f\n", fbest, ei); //cout << "Current Best Interval = " ; //for(int k=0; k<dimension; k++) cout << " " << Xi[i][k] ; //cout << endl ; for (int j=0; j< dimension; j++) { TempQue.push_back(Xi[i][j]) ; TempQue_priority.push_back(ei) ; } // TempQue_priority.push_back(ei) ; } } } printf("fbest = %f \n", fbest); cout << "Current Best Interval = " ; for(int k=0; k<dimension; k++) cout << " " << bestbbInt[k] ; cout << endl ; }
b6baf8f275e2f67c3e8b3c0887365a094ad5b300.cu
#include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <gaol/gaol.h> #include "helper_cuda.h" #include "cuda_interval_lib.h" #include <limits> #include <vector> #include <queue> #include <deque> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include "omp.h" #include <thread> #define NUM_THREADS 4 #define intervalEpsilon 0.001 #define outputEpsilon 0.001 #define K 10 // Sampling number #define DIM 2 // Dimension #define NEW_INTV_THRESHOLD 8 #define CPU_THRESHOLD 12 #define USE_GPU 1 using namespace std ; int syncFlag=0 ; __host__ __device__ float returnMax( float a, float b) { if(a > b) return a ; else return b ; } __host__ __device__ float returnMin( float a, float b) { if(a < b) return a ; else return b ; } //--- Returns the dimension which has the largest width int IntervalWidth( vector<gaol::interval> X ) { if(X.size()==0) { printf("Error: Interval list empty!!!" ) ; exit(-1) ; } float Width = X[0].width(); int index = 0 ; for(int i=0; i<X.size(); i++) { if(X[i].width() > Width) { Width = X[i].width() ; index = i ; } } return index ; } template <typename T> struct KernelArray { T* _array ; int _size ; } ; template <typename T> KernelArray<T> convertToKernel(thrust::device_vector<T>& dvec) { KernelArray<T> kArray ; kArray._array = thrust::raw_pointer_cast(&dvec[0]); kArray._size = (int) dvec.size(); return kArray ; } ; //From script gaol::interval cpu_Inclusion_Func_expr ( gaol::interval x, gaol::interval y) { gaol::interval z ; z = (1 - pow(x,2))*cos(5*y) ; return z ; } //End script //From script float cpu_Func_expr ( float x, float y) { float z = (1 - pow(x,2))*cos(5*y) ; return z ; } //End script //From script __device__ float gpu_Func_expr ( float x, float y ) { float z ; z = (1 - pow(x,2))*cos(5*y) ; return z ; } //Endscript //From script __device__ float gpu_func_array_expr ( float* var ) { float z ; z = (1 - pow(var[0],2))*cos(5*var[1]) ; return z ; } //End script __global__ void gpuKernel ( KernelArray<interval_gpu<double>> gpuMainQue, KernelArray<double> gpuPriQue, int dimension ) { //printf("Testing ................\n"); __shared__ interval_gpu<double> SharedIntervalList[DIM] ; // Warning thrown due to blank constructor __shared__ double RandSampleList[DIM] ; __shared__ double SharedIntDimSample[DIM][K]; float localSampleList[DIM] ; int tix = threadIdx.x ; int tiy = threadIdx.y ; float chunkSize = 0.0 ; // __syncthreads(); if(tiy==0) { SharedIntervalList[tix] = gpuMainQue._array[blockIdx.x*DIM + tix] ; // printf("Copied Interval = %f from thread= %d\n", SharedIntervalList[tix].lower(), tix); RandSampleList[tix] = (SharedIntervalList[tix].lower() + SharedIntervalList[tix].upper())/2 ; } __syncthreads(); chunkSize = (SharedIntervalList[tix].upper() - SharedIntervalList[tix].lower())/K ; SharedIntDimSample[tix][tiy] = SharedIntervalList[tix].lower() +tiy*chunkSize + chunkSize/2 ; //-- Midpoint __syncthreads() ; for(int m=0; m<DIM; m++) localSampleList[m] = RandSampleList[m] ; // localSampleList = RandSampleList; localSampleList[tix] = SharedIntDimSample[tix][tiy] ; // update the random sample for that thread SharedIntDimSample[tix][tiy] = gpu_func_array_expr(localSampleList) ; __syncthreads(); //----- Max reduce for the values per tix and load in RandSampleList(reuse the allocated memory- size of dimension) //----- Do a Max reduce --- int size = K ; for(int i=ceil((float)K/2) ; i>1; i = ceil((float)i/2)) { if(tiy < i && tiy+i<size-1) SharedIntDimSample[tix][tiy] = returnMax(SharedIntDimSample[tix][tiy] , SharedIntDimSample[tix][tiy+i]) ; size = i ; // SharedIntDimSample[tix][tiy] += SharedIntDimSample[tix][tiy + i] ; __syncthreads() ; } if(K > 1 && tiy==0) SharedIntDimSample[tix][0] = returnMax(SharedIntDimSample[tix][0], SharedIntDimSample[tix][1]) ; __syncthreads() ; //----- Max vaue across the dimensions ---- size = 0; for(int i=ceil((float)DIM/2) ; i > 1; i = ceil((float)i/2)) { if(tix < i && tix+i<size-1) SharedIntDimSample[tix][0] = returnMax(SharedIntDimSample[tix][0], SharedIntDimSample[tix+i][0]) ; size = i ; __syncthreads() ; } //if(tix==0 && tiy==0) { // if(DIM > 1) SharedIntDimSample[0][0] = returnMax(SharedIntDimSample[0][0], // SharedIntDimSample[1][0]) ; //--copy this priority value to the global memory //} if(tiy==0) if(DIM > 1)gpuPriQue._array[blockIdx.x*DIM + tix] = returnMax(SharedIntDimSample[0][0], SharedIntDimSample[1][0]); else gpuPriQue._array[blockIdx.x*DIM + tix] = SharedIntDimSample[0][0] ; // gpuPriQue._array[blockIdx.x*DIM + tix] = SharedIntDimSample[0][0] ; //----- The RandSampleList becomes the priority label array of SharedIntervalList ----- //----- Sort these and terminate //---- Sorting done with a thrust call at the host(execution will still be on device) __syncthreads(); } //ManageThreads.push_back(thread(gpuHandlerThread, gpuMainQue, gpuPriQue, dimension)) ; // Trigger the gpu thread void gpuHandlerThread ( KernelArray<interval_gpu<double>> gpuMainQue, KernelArray<double> gpuPriQue, int dimension) { // printf("Debug: 7: Got called...\n"); if(K*dimension > 512) { cout << "Reduce the K value" << endl ; } else { //dim3 dimBlock(K, dimension); dim3 dimBlock(dimension, K); dim3 dimGrid(gpuPriQue._size); gpuKernel<<<dimGrid,dimBlock>>>(gpuMainQue, gpuPriQue, dimension) ; cudaDeviceSynchronize(); //thrust::device_vector<float> a_temp = *gpuPriQue._array ; // thrust::sort(gpuPriQue._array[0], gpuPriQue._array.end[4]) ; } syncFlag = 1 ; } //From script int dimension = 2 ; // From scipt - defined by the problem gaol::interval x_0(0,5), x_1(-7,5) ; // From script - defined by the problem //End script int main() { gaol::init(); omp_set_num_threads(NUM_THREADS); //--- Data structure for the gpu --- thrust::device_vector<interval_gpu<double>> gpu_interval_list ; thrust::device_vector<double> gpu_interval_priority ; thrust::device_vector<float> fbestTag ; KernelArray<interval_gpu<double>> gpuMainQue ; KernelArray<double> gpuPriQue ; //--- Data structure for the cpu --- float fbest = numeric_limits<float>::min(); vector<gaol::interval> bestbbInt(dimension) ; deque<gaol::interval> MainQue ; deque<gaol::interval> TempQue ; deque<float> MainQue_priority ; deque<float> TempQue_priority ; deque<thread> ManageThreads ; vector<gaol::interval> MidPoints(dimension) ; int addedIntervalSize = 0; // Holds information for threshold of gpu call int count=0; int gpuSize=0; //From script TempQue.push_back(x_0) ; // From script - initalise queue with starting intervals TempQue.push_back(x_1) ; //End script vector<gaol::interval> X(dimension); // Intervals to be used inside the while loop vector<gaol::interval> X1(dimension); // Intervals to be used inside the while loop vector<gaol::interval> X2(dimension); // Intervals to be used inside the while loop vector<vector<gaol::interval>> Xi ; gaol::interval FunctionBound ; //---- Initialise fbestTag -------- fbestTag.push_back(numeric_limits<float>::min()) ; //---- Get the priority of the starting interval ---- //From script gaol::interval PriTemp = cpu_Inclusion_Func_expr( (x_0.left() + x_0.right())/2 , (x_1.left() + x_1.right())/2 ) ; //End script for(int i=0; i<dimension; i++) TempQue_priority.push_back(PriTemp.left()) ; int loop_Counter = 0 ; //-- ManageThreads commented until gpu comes alive while( (int)TempQue_priority.size()> 0 || (int)MainQue_priority.size()>0 || (int)ManageThreads.size()>0) { // printf("Start: MainQue_priority = %d \n", (int)MainQue_priority.size()); // printf("Start: TempQue_priority = %d \n", (int)TempQue_priority.size()); // printf("Start: ManageThreads = %d \n", (int)ManageThreads.size()); // printf("Debug: 6: Reached here: Temp_Queue_Size = %lu, Main_Queue_size = %lu\n", TempQue.size(), MainQue.size()); // cout << " Idiot!!... Terminate while :)...." << endl ; if(USE_GPU==1 && syncFlag == 1 && ManageThreads.size() != 0 && count>=CPU_THRESHOLD) { // gpu calls to be joined if(ManageThreads.front().joinable()) ManageThreads.front().join() ; loop_Counter++ ; // if(loop_Counter==1) { // cout << "After the re-written priority from gpu " << endl ; // for(int i=0; i<(int)gpu_interval_priority.size(); i++) { // interval_gpu<double> temp = gpu_interval_list[i] ; // cout << temp.lower() << " : " << temp.upper() << " -- " << gpu_interval_priority[i] << endl ; // } //cout << "GPU_QUEUE_SIZE = " << (int)gpu_interval_priority.size() << endl ; cout << "GPU_QUEUE_SIZE RECEIVED= " << gpuPriQue._size << endl ; // } thrust::stable_sort_by_key(gpu_interval_priority.begin(), gpu_interval_priority.end(), gpu_interval_list.begin()) ; // cout << "After Sorting " << endl ; // for(int i=0; i<(int)gpu_interval_priority.size(); i++) { // interval_gpu<double> temp = gpu_interval_list[i] ; // cout << temp.lower() << " : " << temp.upper() << " -- " << gpu_interval_priority[i] << endl ; // } syncFlag = 0 ; count = 0 ; ManageThreads.pop_front() ; //MainQue.clear() ; for(int i=0; i < (int)gpu_interval_list.size()/dimension; i++) { // translate gpu return list to gaol for(int j=dimension-1; j>=0; j--) { interval_gpu<double> ij_gpu = gpu_interval_list[i*dimension + j] ; gaol::interval ij(ij_gpu.lower(), ij_gpu.upper()); MainQue.push_front(ij) ; MainQue_priority.push_front(gpu_interval_priority[i*dimension + j]) ; } //MainQue_priority.push_front(gpu_interval_priority[i]) ; } gpu_interval_priority.clear(); gpu_interval_list.clear(); } if((int)TempQue.size() != 0) { // cout << " TempQue-Size = " << TempQue_priority.size() << endl ; for(int i=0; i<(int)TempQue.size()/dimension; i++) { // push the TempQue to the MainQue for(int j=0; j < dimension; j++) { MainQue.push_back(TempQue[i*dimension+j]) ; MainQue_priority.push_back(TempQue_priority[i*dimension+j]) ; } //MainQue_priority.push_back(TempQue_priority[i]) ; //printf("Update-2: MainQue_priority = %d \n", (int)MainQue_priority.size()); } TempQue_priority.clear(); TempQue.clear(); } if(USE_GPU==1 && (int)MainQue_priority.size()/dimension - CPU_THRESHOLD > NEW_INTV_THRESHOLD && ManageThreads.size()==0 ) { // minimum number of new intervals to trigger gpu // gpu_interval_list.clear(); // gpu_interval_priority.clear(); cout << "MainQueSize = " << (int)MainQue.size() << endl ; for(int i=CPU_THRESHOLD; i<(int)MainQue.size()/dimension; i++) { for(int j=0; j<dimension; j++) { interval_gpu<double> ij(MainQue[i*dimension+j].left(), MainQue[i*dimension+j].right()) ; gpu_interval_list.push_back(ij) ; //cout << " Enter here " << gpu_interval_list.size() << endl ; gpu_interval_priority.push_back(MainQue_priority[i*dimension+j]) ; } //gpu_interval_priority.push_back(MainQue_priority[i]) ; } // cout << "Starting New GPU call of size " << gpu_interval_priority.size() << endl ; // for(int i=0; i<(int)gpu_interval_priority.size(); i++) { // interval_gpu<double> temp = gpu_interval_list[i] ; // cout << temp.lower() << " : " << temp.upper() << " -- " << gpu_interval_priority[i] << endl ; // } //--- Clear the intervals that has been provided to the gpu --- MainQue.erase(MainQue.begin() + (CPU_THRESHOLD)*dimension , MainQue.end()) ; MainQue_priority.erase(MainQue_priority.begin() + (CPU_THRESHOLD)*dimension, MainQue_priority.end()) ; /* KernelArray<interval_gpu<float>>*/ gpuMainQue = convertToKernel(gpu_interval_list); /* KernelArray<float>*/ gpuPriQue = convertToKernel(gpu_interval_priority); cout << " GPU_QUEUE_SIZE SENT = " << gpuPriQue._size << endl ; ManageThreads.push_back(thread(gpuHandlerThread, gpuMainQue, gpuPriQue, dimension)) ; // Trigger the gpu thread } fbest = returnMax(fbest, fbestTag.front()); X.clear(); X1.clear(); X2.clear(); Xi.clear(); //printf("MainQueueSize = %lu, TempQueueSize = %lu\n", MainQue_priority.size(), TempQue.size()); for(int i=0; i<dimension; i++) { X.push_back(MainQue.front()); MainQue.pop_front(); MainQue_priority.pop_front(); } // MainQue_priority.pop_front(); count++ ; //From script FunctionBound = cpu_Inclusion_Func_expr( X[0], X[1] ) ; // possibly from script //End script if ( FunctionBound.right() < fbest || X[IntervalWidth(X)].width() <= intervalEpsilon || FunctionBound.width() <= outputEpsilon ) { //printf("GetNextElement\n"); //cout << "Current-Size = " << (int)MainQue_priority.size()/dimension << endl ; } else { for(int i=0; i<dimension; i++) { if(i == IntervalWidth(X)) { gaol::interval a(X[i].left(), X[i].left() + X[i].width()/2 ) ; gaol::interval b(X[i].left() + X[i].width()/2, X[i].right() ) ; X1.push_back(a); X2.push_back(b); } else { X1.push_back(X[i]); X2.push_back(X[i]) ; } } Xi.push_back(X1); Xi.push_back(X2) ; for(int i=0; i< 2; i++) { //From script float ei = cpu_Func_expr( Xi[i][0].width()/2 + Xi[i][0].left() , Xi[i][1].width()/2 + Xi[i][1].left() ); //End script if(ei > fbest) { fbest = ei ; bestbbInt = Xi[i] ; } //printf("fbest = %f , Ei = %f\n", fbest, ei); //cout << "Current Best Interval = " ; //for(int k=0; k<dimension; k++) cout << " " << Xi[i][k] ; //cout << endl ; for (int j=0; j< dimension; j++) { TempQue.push_back(Xi[i][j]) ; TempQue_priority.push_back(ei) ; } // TempQue_priority.push_back(ei) ; } } } printf("fbest = %f \n", fbest); cout << "Current Best Interval = " ; for(int k=0; k<dimension; k++) cout << " " << bestbbInt[k] ; cout << endl ; }
e77bc50902228d52d3591cefcee7df7a56fa7e6d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "callOperation.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int *res = NULL; hipMalloc(&res, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( callOperation), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,res,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( callOperation), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,res,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( callOperation), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,res,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e77bc50902228d52d3591cefcee7df7a56fa7e6d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "callOperation.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int *res = NULL; cudaMalloc(&res, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); callOperation<<<gridBlock,threadBlock>>>(a,b,res,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { callOperation<<<gridBlock,threadBlock>>>(a,b,res,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { callOperation<<<gridBlock,threadBlock>>>(a,b,res,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4c8770d794f3f5dd86bd8f88bc46483ad08d438d.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" struct ThresholdUpdateOutput { const float threshold_; const float val_; ThresholdUpdateOutput(float threshold, float val) : threshold_(threshold) , val_(val) {} __device__ __forceinline__ void operator()(float *out, float *in) { float x = *in; *out = (x > threshold_) ? x : val_; } }; // in-place variant struct ThresholdUpdateOutputIP { const float threshold_; const float val_; ThresholdUpdateOutputIP(float threshold, float val) : threshold_(threshold) , val_(val) {} __device__ __forceinline__ void operator()(float *x) { *x = (*x > threshold_) ? *x : val_; } }; void THNN_CudaThreshold_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, double threshold, double val, bool inplace) { THCUNN_assertSameGPU(state, 2, input, output); if (inplace) { THCudaTensor_pointwiseApply1(state, input, ThresholdUpdateOutputIP(threshold, val) ); THCudaTensor_set(state, output, input); } else { THCudaTensor_resizeAs(state, output, input); THCudaTensor_pointwiseApply2(state, output, input, ThresholdUpdateOutput(threshold, val) ); } THCudaCheck(hipGetLastError()); } struct ThresholdUpdateGradInput { const float threshold_; ThresholdUpdateGradInput(float threshold) : threshold_(threshold) {} __device__ __forceinline__ void operator()( float *gradInput, float *input, float *gradOutput) const { *gradInput = (*input > threshold_) ? *gradOutput : 0; } }; struct ThresholdUpdateGradInputIP { const float threshold_; ThresholdUpdateGradInputIP(float threshold) : threshold_(threshold) {} __device__ __forceinline__ void operator()( float *gradOutput, float *input) const { *gradOutput = (*input > threshold_) ? *gradOutput : 0; } }; void THNN_CudaThreshold_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, double threshold, bool inplace) { THCUNN_assertSameGPU(state, 3, input, gradInput, gradOutput); if (inplace) { THCudaTensor_pointwiseApply2(state, gradOutput, input, ThresholdUpdateGradInputIP(threshold) ); THCudaTensor_set(state, gradInput, gradOutput); } else { THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput, ThresholdUpdateGradInput(threshold) ); } THCudaCheck(hipGetLastError()); }
4c8770d794f3f5dd86bd8f88bc46483ad08d438d.cu
#include "THCUNN.h" #include "common.h" struct ThresholdUpdateOutput { const float threshold_; const float val_; ThresholdUpdateOutput(float threshold, float val) : threshold_(threshold) , val_(val) {} __device__ __forceinline__ void operator()(float *out, float *in) { float x = *in; *out = (x > threshold_) ? x : val_; } }; // in-place variant struct ThresholdUpdateOutputIP { const float threshold_; const float val_; ThresholdUpdateOutputIP(float threshold, float val) : threshold_(threshold) , val_(val) {} __device__ __forceinline__ void operator()(float *x) { *x = (*x > threshold_) ? *x : val_; } }; void THNN_CudaThreshold_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, double threshold, double val, bool inplace) { THCUNN_assertSameGPU(state, 2, input, output); if (inplace) { THCudaTensor_pointwiseApply1(state, input, ThresholdUpdateOutputIP(threshold, val) ); THCudaTensor_set(state, output, input); } else { THCudaTensor_resizeAs(state, output, input); THCudaTensor_pointwiseApply2(state, output, input, ThresholdUpdateOutput(threshold, val) ); } THCudaCheck(cudaGetLastError()); } struct ThresholdUpdateGradInput { const float threshold_; ThresholdUpdateGradInput(float threshold) : threshold_(threshold) {} __device__ __forceinline__ void operator()( float *gradInput, float *input, float *gradOutput) const { *gradInput = (*input > threshold_) ? *gradOutput : 0; } }; struct ThresholdUpdateGradInputIP { const float threshold_; ThresholdUpdateGradInputIP(float threshold) : threshold_(threshold) {} __device__ __forceinline__ void operator()( float *gradOutput, float *input) const { *gradOutput = (*input > threshold_) ? *gradOutput : 0; } }; void THNN_CudaThreshold_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, double threshold, bool inplace) { THCUNN_assertSameGPU(state, 3, input, gradInput, gradOutput); if (inplace) { THCudaTensor_pointwiseApply2(state, gradOutput, input, ThresholdUpdateGradInputIP(threshold) ); THCudaTensor_set(state, gradInput, gradOutput); } else { THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput, ThresholdUpdateGradInput(threshold) ); } THCudaCheck(cudaGetLastError()); }
e0675de4160b29b9915f9484065dd7f11e4dd0d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S2_5.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6743585456438,0.00126116515238777,0.782285143101146,0.781885737321280,0.000172267497323657,0.486193660951379,0.00291820808108493,0.999998382455018,1.89973078307127e-08,1.86451321167615e-05,0.999780198191440,1.00782702931804,0.999999754763967,2.76599036686923e-05,0.357538249293263,10.7085717792583,139.021384569998}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.4941061664816,0.000306940351318330,0.000126486160649835,0.000251593758331556,0.231852653636147,0.170492615868249,0.109036079095606,4.44796487754522,0.0111149661882113,1.23956736157302,1099.91017026794,0.000314927815763443,0.381236416535235,0.0193513922111542,0.00539385037460332,9.81890868796030e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
e0675de4160b29b9915f9484065dd7f11e4dd0d7.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S2_5.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6743585456438,0.00126116515238777,0.782285143101146,0.781885737321280,0.000172267497323657,0.486193660951379,0.00291820808108493,0.999998382455018,1.89973078307127e-08,1.86451321167615e-05,0.999780198191440,1.00782702931804,0.999999754763967,2.76599036686923e-05,0.357538249293263,10.7085717792583,139.021384569998}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.4941061664816,0.000306940351318330,0.000126486160649835,0.000251593758331556,0.231852653636147,0.170492615868249,0.109036079095606,4.44796487754522,0.0111149661882113,1.23956736157302,1099.91017026794,0.000314927815763443,0.381236416535235,0.0193513922111542,0.00539385037460332,9.81890868796030e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
9eaf746f66293471eccaa0f23b8f3726094cc38c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void __kernel_dense2blk(const half *__restrict__ dense, half *__restrict__ blk, const int ncol) { const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto warp_id = tid / 32; const auto blk_col_num = ncol / 16; const auto row_blk = warp_id / blk_col_num; const auto col_blk = warp_id % blk_col_num; cmat_t frag; const auto src = get_blk_start(dense, row_blk, col_blk, ncol); wmma::load_matrix_sync(frag, src, ncol, wmma::mem_row_major); const auto dst = &blk[(blk_col_num * row_blk + col_blk) * 256]; wmma::store_matrix_sync(dst, frag, 16, wmma::mem_row_major); }
9eaf746f66293471eccaa0f23b8f3726094cc38c.cu
__global__ void __kernel_dense2blk(const half *__restrict__ dense, half *__restrict__ blk, const int ncol) { const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto warp_id = tid / 32; const auto blk_col_num = ncol / 16; const auto row_blk = warp_id / blk_col_num; const auto col_blk = warp_id % blk_col_num; cmat_t frag; const auto src = get_blk_start(dense, row_blk, col_blk, ncol); wmma::load_matrix_sync(frag, src, ncol, wmma::mem_row_major); const auto dst = &blk[(blk_col_num * row_blk + col_blk) * 256]; wmma::store_matrix_sync(dst, frag, 16, wmma::mem_row_major); }
28fc5ea18332983ab02838fcfe93fea63ca3ec7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/layers/activation/pooling_layer.hpp" namespace caffe { template <typename Dtype> static __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) for (int w = wstart; w < wend; ++w) if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_data[maxidx]; } top_data[index] = maxval; mask[index] = maxidx; } } template <typename Dtype> static __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) for (int w = wstart; w < wend; ++w) aveval += bottom_data[h * width + w]; top_data[index] = aveval / pool_size; } } template <typename Dtype> static __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const int* mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; int phend = min((h + pad_h) / stride_h + 1, pooled_height); int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; mask += offset; for (int ph = phstart; ph < phend; ++ph) for (int pw = pwstart; pw < pwend; ++pw) if (mask[ph * pooled_width + pw] == h * width + w) gradient += top_diff[ph * pooled_width + pw]; bottom_diff[index] = gradient; } } template <typename Dtype> static __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int w = index % width + pad_w; int h = (index / width) % height + pad_h; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, pooled_height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff[ph * pooled_width + pw] / pool_size; } bottom_diff[index] = gradient; } } //-------------------------------------------------------------------------------------- template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); int* mask = NULL; if (this->layer_param_.pooling_param().global_pool() == false) { if (this->layer_param_.pooling_param().pool() == "max") { mask = max_idx_.mutable_gpu_data(); hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, top_data, mask); } else if (this->layer_param_.pooling_param().pool() == "ave") { hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, top_data); } else LOG(FATAL)<<"unsupported"; } else { if (this->layer_param_.pooling_param().pool() == "max") { mask = max_idx_.mutable_gpu_data(); hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, num, channels, height, width, pooled_height_, pooled_width_, height, width, height, width, 0, 0, top_data, mask); } else if (this->layer_param_.pooling_param().pool() == "ave") { hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, num, channels, height, width, pooled_height_, pooled_width_, height, width, height, width, 0, 0, top_data); } else LOG(FATAL)<<"unsupported"; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* mask = NULL; if (this->layer_param_.pooling_param().global_pool() == false) { if (this->layer_param_.pooling_param().pool() == "max") { mask = max_idx_.gpu_data(); hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, bottom_diff); } else if (this->layer_param_.pooling_param().pool() == "ave") { hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, bottom_diff); } else LOG(FATAL)<<"unsupported"; } else { if (this->layer_param_.pooling_param().pool() == "max") { mask = max_idx_.gpu_data(); hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, num, channels, height, width, pooled_height_, pooled_width_, height, width, height, width, 0, 0, bottom_diff); } else if (this->layer_param_.pooling_param().pool() == "ave") { hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, num, channels, height, width, pooled_height_, pooled_width_, height, width, height, width, 0, 0, bottom_diff); } else LOG(FATAL)<<"unsupported"; } } template <typename Dtype> void PoolingLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); int* mask = NULL; hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->gpu_sec_diff(), num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, top[0]->mutable_gpu_sec_diff()); } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
28fc5ea18332983ab02838fcfe93fea63ca3ec7d.cu
#include "caffe/layers/activation/pooling_layer.hpp" namespace caffe { template <typename Dtype> static __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) for (int w = wstart; w < wend; ++w) if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_data[maxidx]; } top_data[index] = maxval; mask[index] = maxidx; } } template <typename Dtype> static __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) for (int w = wstart; w < wend; ++w) aveval += bottom_data[h * width + w]; top_data[index] = aveval / pool_size; } } template <typename Dtype> static __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const int* mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; int phend = min((h + pad_h) / stride_h + 1, pooled_height); int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; mask += offset; for (int ph = phstart; ph < phend; ++ph) for (int pw = pwstart; pw < pwend; ++pw) if (mask[ph * pooled_width + pw] == h * width + w) gradient += top_diff[ph * pooled_width + pw]; bottom_diff[index] = gradient; } } template <typename Dtype> static __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int w = index % width + pad_w; int h = (index / width) % height + pad_h; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, pooled_height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff[ph * pooled_width + pw] / pool_size; } bottom_diff[index] = gradient; } } //-------------------------------------------------------------------------------------- template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); int* mask = NULL; if (this->layer_param_.pooling_param().global_pool() == false) { if (this->layer_param_.pooling_param().pool() == "max") { mask = max_idx_.mutable_gpu_data(); MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, bottom_data, num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, top_data, mask); } else if (this->layer_param_.pooling_param().pool() == "ave") { AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, bottom_data, num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, top_data); } else LOG(FATAL)<<"unsupported"; } else { if (this->layer_param_.pooling_param().pool() == "max") { mask = max_idx_.mutable_gpu_data(); MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, bottom_data, num, channels, height, width, pooled_height_, pooled_width_, height, width, height, width, 0, 0, top_data, mask); } else if (this->layer_param_.pooling_param().pool() == "ave") { AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, bottom_data, num, channels, height, width, pooled_height_, pooled_width_, height, width, height, width, 0, 0, top_data); } else LOG(FATAL)<<"unsupported"; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* mask = NULL; if (this->layer_param_.pooling_param().global_pool() == false) { if (this->layer_param_.pooling_param().pool() == "max") { mask = max_idx_.gpu_data(); MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, top_diff, mask, num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, bottom_diff); } else if (this->layer_param_.pooling_param().pool() == "ave") { AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, top_diff, num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, bottom_diff); } else LOG(FATAL)<<"unsupported"; } else { if (this->layer_param_.pooling_param().pool() == "max") { mask = max_idx_.gpu_data(); MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, top_diff, mask, num, channels, height, width, pooled_height_, pooled_width_, height, width, height, width, 0, 0, bottom_diff); } else if (this->layer_param_.pooling_param().pool() == "ave") { AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, top_diff, num, channels, height, width, pooled_height_, pooled_width_, height, width, height, width, 0, 0, bottom_diff); } else LOG(FATAL)<<"unsupported"; } } template <typename Dtype> void PoolingLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); int* mask = NULL; AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, bottom[0]->gpu_sec_diff(), num, channels, height, width, pooled_height_, pooled_width_, kernel_size_, kernel_size_, stride_, stride_, pad_, pad_, top[0]->mutable_gpu_sec_diff()); } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
1e1e02e6b7c7ce20ea6f4c6a2e367d6518d7b261.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -*- compile-command: "nvcc arch sm_50 -Xptxas=-v -cubin kernel.cu"; -*- // // // #ifdef __cplusplus extern "C" { #endif #include "assert_cuda.h" #ifdef __cplusplus } #endif // // // #define PXL_KERNEL_THREADS_PER_BLOCK 256 // enough for 4Kx2 monitor // // // surface<void,cudaSurfaceType2D> surf; // // // union pxl_rgbx_24 { uint1 b32; struct { unsigned r : 8; unsigned g : 8; unsigned b : 8; unsigned na : 8; }; }; // // // extern "C" __global__ void pxl_kernel(const int width, const int height) { // pixel coordinates const int idx = (blockDim.x * blockIdx.x) + threadIdx.x; const int x = idx % width; const int y = idx / width; #if 1 // pixel color const int t = (unsigned int)clock() / 1100000; // 1.1 GHz const int xt = (idx + t) % width; const unsigned int ramp = (unsigned int)(((float)xt / (float)(width-1)) * 255.0f + 0.5f); const unsigned int bar = ((y + t) / 32) & 3; union pxl_rgbx_24 rgbx; rgbx.r = (bar == 0) || (bar == 1) ? ramp : 0; rgbx.g = (bar == 0) || (bar == 2) ? ramp : 0; rgbx.b = (bar == 0) || (bar == 3) ? ramp : 0; rgbx.na = 255; #else // DRAW A RED BORDER TO VALIDATE FLIPPED BLIT const bool border = (x == 0) || (x == width-1) || (y == 0) || (y == height-1); union pxl_rgbx_24 rgbx = { border ? 0xFF0000FF : 0xFF000000 }; #endif surf2Dwrite(rgbx.b32, // even simpler: (unsigned int)clock() surf, x*sizeof(rgbx), y, hipBoundaryModeZero); // squelches out-of-bound writes } // // // extern "C" hipError_t pxl_kernel_launcher(hipArray_const_t array, const int width, const int height, hipEvent_t event, hipStream_t stream) { hipError_t cuda_err; // cuda_err = hipEventRecord(event,stream); cuda_err = cuda(BindSurfaceToArray(surf,array)); if (cuda_err) return cuda_err; const int blocks = (width * height + PXL_KERNEL_THREADS_PER_BLOCK - 1) / PXL_KERNEL_THREADS_PER_BLOCK; // cuda_err = hipEventRecord(event,stream); if (blocks > 0) hipLaunchKernelGGL(( pxl_kernel), dim3(blocks),dim3(PXL_KERNEL_THREADS_PER_BLOCK),0,stream, width,height); // cuda_err = hipStreamWaitEvent(stream,event,0); return hipSuccess; } // // //
1e1e02e6b7c7ce20ea6f4c6a2e367d6518d7b261.cu
// -*- compile-command: "nvcc arch sm_50 -Xptxas=-v -cubin kernel.cu"; -*- // // // #ifdef __cplusplus extern "C" { #endif #include "assert_cuda.h" #ifdef __cplusplus } #endif // // // #define PXL_KERNEL_THREADS_PER_BLOCK 256 // enough for 4Kx2 monitor // // // surface<void,cudaSurfaceType2D> surf; // // // union pxl_rgbx_24 { uint1 b32; struct { unsigned r : 8; unsigned g : 8; unsigned b : 8; unsigned na : 8; }; }; // // // extern "C" __global__ void pxl_kernel(const int width, const int height) { // pixel coordinates const int idx = (blockDim.x * blockIdx.x) + threadIdx.x; const int x = idx % width; const int y = idx / width; #if 1 // pixel color const int t = (unsigned int)clock() / 1100000; // 1.1 GHz const int xt = (idx + t) % width; const unsigned int ramp = (unsigned int)(((float)xt / (float)(width-1)) * 255.0f + 0.5f); const unsigned int bar = ((y + t) / 32) & 3; union pxl_rgbx_24 rgbx; rgbx.r = (bar == 0) || (bar == 1) ? ramp : 0; rgbx.g = (bar == 0) || (bar == 2) ? ramp : 0; rgbx.b = (bar == 0) || (bar == 3) ? ramp : 0; rgbx.na = 255; #else // DRAW A RED BORDER TO VALIDATE FLIPPED BLIT const bool border = (x == 0) || (x == width-1) || (y == 0) || (y == height-1); union pxl_rgbx_24 rgbx = { border ? 0xFF0000FF : 0xFF000000 }; #endif surf2Dwrite(rgbx.b32, // even simpler: (unsigned int)clock() surf, x*sizeof(rgbx), y, cudaBoundaryModeZero); // squelches out-of-bound writes } // // // extern "C" cudaError_t pxl_kernel_launcher(cudaArray_const_t array, const int width, const int height, cudaEvent_t event, cudaStream_t stream) { cudaError_t cuda_err; // cuda_err = cudaEventRecord(event,stream); cuda_err = cuda(BindSurfaceToArray(surf,array)); if (cuda_err) return cuda_err; const int blocks = (width * height + PXL_KERNEL_THREADS_PER_BLOCK - 1) / PXL_KERNEL_THREADS_PER_BLOCK; // cuda_err = cudaEventRecord(event,stream); if (blocks > 0) pxl_kernel<<<blocks,PXL_KERNEL_THREADS_PER_BLOCK,0,stream>>>(width,height); // cuda_err = cudaStreamWaitEvent(stream,event,0); return cudaSuccess; } // // //
c9446561ecd1435cb7bad6d882081471683f82ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <cpu_bitmap.h> #define INF 2e10f #define rnd(x)(x*rand()/RAND_MAX) #define SPHERES 20 #define DIM 1024 struct Sphere { float r, b, g; float radius; float x, y, z; __device__ float hit(float ox, float oy, float *n) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf(radius*radius - dx*dx - dy*dy); *n = dz / sqrtf(radius*radius); return dz + z; } return -INF; } }; __constant__ Sphere s[SPHERES]; /*struct DataBlock { unsigned char *dev_bitmap; Sphere *s; }; */ __global__ void kernel(unsigned char *ptr) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; int offset = x + y*blockDim.x*gridDim.x; float ox = (x - DIM / 2); float oy = (y - DIM / 2); float r = 0, g = 0, b = 0; float maxz = -INF; for (int i = 0; i < SPHERES; i++) { float n; float t = s[i].hit(ox, oy, &n); if (t > maxz) { float fscale = n; r = s[i].r*fscale; g = s[i].g*fscale; b = s[i].b*fscale; } } ptr[offset * 4 + 0] = (int)(r * 255); ptr[offset * 4 + 1] = (int)(g * 255); ptr[offset * 4 + 2] = (int)(b * 255); ptr[offset * 4 + 3] = 255; } int main(void) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //DataBlock data; CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; hipMalloc((void**)&dev_bitmap, bitmap.image_size()); //hipMalloc((void**)&s, sizeof(Sphere)*SPHERES); Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere)*SPHERES); for (int i = 0; i < SPHERES; i++) { temp_s[i].r = rnd(1.0f); temp_s[i].g = rnd(1.0f); temp_s[i].b = rnd(1.0f); temp_s[i].x = rnd(1000.0f) - 500; temp_s[i].y = rnd(1000.0f) - 500; temp_s[i].z = rnd(1000.0f) - 500; temp_s[i].radius = rnd(100.0f) + 20; } hipMemcpyToSymbol(s, temp_s, sizeof(Sphere)*SPHERES); free(temp_s); dim3 grids(DIM / 16, DIM / 16); dim3 threads(16, 16); kernel << <grids, threads >> > (dev_bitmap); hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost); bitmap.display_and_exit(); hipFree(dev_bitmap); hipFree(s); };
c9446561ecd1435cb7bad6d882081471683f82ab.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <cpu_bitmap.h> #define INF 2e10f #define rnd(x)(x*rand()/RAND_MAX) #define SPHERES 20 #define DIM 1024 struct Sphere { float r, b, g; float radius; float x, y, z; __device__ float hit(float ox, float oy, float *n) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf(radius*radius - dx*dx - dy*dy); *n = dz / sqrtf(radius*radius); return dz + z; } return -INF; } }; __constant__ Sphere s[SPHERES]; /*struct DataBlock { unsigned char *dev_bitmap; Sphere *s; }; */ __global__ void kernel(unsigned char *ptr) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; int offset = x + y*blockDim.x*gridDim.x; float ox = (x - DIM / 2); float oy = (y - DIM / 2); float r = 0, g = 0, b = 0; float maxz = -INF; for (int i = 0; i < SPHERES; i++) { float n; float t = s[i].hit(ox, oy, &n); if (t > maxz) { float fscale = n; r = s[i].r*fscale; g = s[i].g*fscale; b = s[i].b*fscale; } } ptr[offset * 4 + 0] = (int)(r * 255); ptr[offset * 4 + 1] = (int)(g * 255); ptr[offset * 4 + 2] = (int)(b * 255); ptr[offset * 4 + 3] = 255; } int main(void) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //DataBlock data; CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; cudaMalloc((void**)&dev_bitmap, bitmap.image_size()); //cudaMalloc((void**)&s, sizeof(Sphere)*SPHERES); Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere)*SPHERES); for (int i = 0; i < SPHERES; i++) { temp_s[i].r = rnd(1.0f); temp_s[i].g = rnd(1.0f); temp_s[i].b = rnd(1.0f); temp_s[i].x = rnd(1000.0f) - 500; temp_s[i].y = rnd(1000.0f) - 500; temp_s[i].z = rnd(1000.0f) - 500; temp_s[i].radius = rnd(100.0f) + 20; } cudaMemcpyToSymbol(s, temp_s, sizeof(Sphere)*SPHERES); free(temp_s); dim3 grids(DIM / 16, DIM / 16); dim3 threads(16, 16); kernel << <grids, threads >> > (dev_bitmap); cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost); bitmap.display_and_exit(); cudaFree(dev_bitmap); cudaFree(s); };
c2bdc52050c27b002425d8006405a44521843bd6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <string.h> #include <sys/time.h> #include "Graph.h" // CUDA runtime //#include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA //#include <helper_functions.h> //#include <helper_cuda.h> #define blocksize 256 using namespace std; //************************************************************************** double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6); } //************************************************************************** // FLOYD 1D BLOCKS __global__ void floyd_1d(int *M, const int nverts, const int k) { int ij = threadIdx.x + blockDim.x * blockIdx.x; if (ij < nverts * nverts) { int Mij = M[ij]; int i = ij / nverts; int j = ij - i * nverts; if (i != j && i != k && j != k) { int Mikj = M[i * nverts + k] + M[k * nverts + j]; Mij = (Mij > Mikj) ? Mikj : Mij; M[ij] = Mij; } } } //************************************************************************** // FLOYD 2D BLOCKS __global__ void floyd_kernel(int *M, const int nverts, const int k) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; int index = i * nverts + j; if (i < nverts && j < nverts) { int Mij = M[index]; if (i != j && i != k && j != k) { int Mikj = M[i * nverts + k] + M[k * nverts + j]; M[index] = (Mij > Mikj) ? Mikj : Mij; } } } //************************************************************************** // FIND MAX IN VECTOR __global__ void reduceMax(int * V_in, int * V_out, const int N) { extern __shared__ int sdata[]; int tid = threadIdx.x; int mid = blockDim.x/2; int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; sdata[tid] = ((i < N) ? V_in[i] : -1); sdata[tid + blockDim.x] = ((i + blockDim.x) < N ? V_in[i + blockDim.x] : -1); __syncthreads(); for(int s = mid; s > 0; s >>= 1) { if (tid < s) { if(sdata[tid] < sdata[tid + s]) { sdata[tid] = sdata[tid + s]; } } else if((i + blockDim.x + s) < N) { if(sdata[tid + mid] < sdata[tid + mid + s]) { sdata[tid + mid] = sdata[tid + mid + s]; } } __syncthreads(); } if (tid == 0) { V_out[blockIdx.x * 2] = sdata[0]; } else if (tid == mid) { V_out[(blockIdx.x * 2) + 1] = sdata[tid + mid]; } } int main(int argc, char *argv[]) { if (argc != 2) { cerr << "Sintaxis: " << argv[0] << " <archivo de grafo>" << endl; return (-1); } //Get GPU information int devID; hipDeviceProp_t props; hipError_t err; err = hipGetDevice(&devID); if (err != hipSuccess) { cout << "ERRORRR" << endl; } hipGetDeviceProperties(&props, devID); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); Graph G; G.lee(argv[1]); // Read the Graph //cout << "EL Grafo de entrada es:"<<endl; //G.imprime(); const int nverts = G.vertices; const int niters = nverts; const int nverts2 = nverts * nverts; int *c_Out_M = new int[nverts2]; int size = nverts2 * sizeof(int); int *d_In_M = NULL; err = hipMalloc((void **)&d_In_M, size); if (err != hipSuccess) { cout << "ERROR RESERVA" << endl; } int *A = G.Get_Matrix(); // GPU phase double t1 = cpuSecond(); err = hipMemcpy(d_In_M, A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { cout << "ERROR COPIA A GPU" << endl; } for (int k = 0; k < niters; k++) { //printf("CUDA kernel launch \n"); int threadsPerDim = sqrt(blocksize); dim3 threadsPerBlock (threadsPerDim, threadsPerDim); dim3 numBlocks( ceil((float)nverts/threadsPerBlock.x), ceil((float)nverts/threadsPerBlock.y) ); hipLaunchKernelGGL(( floyd_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_In_M, nverts, k); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch kernel! ERROR= %d\n", err); exit(EXIT_FAILURE); } } hipMemcpy(c_Out_M, d_In_M, size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); double Tgpu = cpuSecond() - t1; cout << "Tiempo gastado GPU= " << Tgpu << endl << endl; // CPU phase t1 = cpuSecond(); // BUCLE PPAL DEL ALGORITMO int inj, in, kn; for (int k = 0; k < niters; k++) { kn = k * nverts; for (int i = 0; i < nverts; i++) { in = i * nverts; for (int j = 0; j < nverts; j++) if (i != j && i != k && j != k) { inj = in + j; A[inj] = min(A[in + k] + A[kn + j], A[inj]); } } } double t2 = cpuSecond() - t1; cout << "Tiempo gastado CPU= " << t2 << endl << endl; cout << "Ganancia= " << t2 / Tgpu << endl; for (int i = 0; i < nverts; i++) for (int j = 0; j < nverts; j++) if (abs(c_Out_M[i * nverts + j] - G.arista(i, j)) > 0) cout << "Error (" << i << "," << j << ") " << c_Out_M[i * nverts + j] << "..." << G.arista(i, j) << endl; // c_d Maximum computation on GPU dim3 threadsPerBlock(blocksize); dim3 numBlocks( ceil ((float)(nverts2 / 2)/threadsPerBlock.x)); // Maximum vector on CPU int * vmax; vmax = (int*) malloc(2*numBlocks.x*sizeof(int)); // Maximum vector to be computed on GPU int *vmax_d; hipMalloc ((void **) &vmax_d, sizeof(int)*2*numBlocks.x); int smemSize = 2*threadsPerBlock.x*sizeof(int); // Kernel launch to compute Minimum Vector hipLaunchKernelGGL(( reduceMax), dim3(numBlocks), dim3(threadsPerBlock), smemSize, 0, c_Out_M,vmax_d, nverts2); /* Copy data from device memory to host memory */ hipMemcpy(vmax, vmax_d, 2*numBlocks.x*sizeof(int),hipMemcpyDeviceToHost); // Perform final reduction in CPU int max_gpu = -1; for (int i=0; i<numBlocks.x * 2; i++) { max_gpu =max(max_gpu,vmax[i]); } cout << endl << " Camino ms largo entre los caminos mnimos = " << max_gpu << endl; }
c2bdc52050c27b002425d8006405a44521843bd6.cu
#include <iostream> #include <fstream> #include <string.h> #include <sys/time.h> #include "Graph.h" // CUDA runtime //#include <cuda_runtime.h> // helper functions and utilities to work with CUDA //#include <helper_functions.h> //#include <helper_cuda.h> #define blocksize 256 using namespace std; //************************************************************************** double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6); } //************************************************************************** // FLOYD 1D BLOCKS __global__ void floyd_1d(int *M, const int nverts, const int k) { int ij = threadIdx.x + blockDim.x * blockIdx.x; if (ij < nverts * nverts) { int Mij = M[ij]; int i = ij / nverts; int j = ij - i * nverts; if (i != j && i != k && j != k) { int Mikj = M[i * nverts + k] + M[k * nverts + j]; Mij = (Mij > Mikj) ? Mikj : Mij; M[ij] = Mij; } } } //************************************************************************** // FLOYD 2D BLOCKS __global__ void floyd_kernel(int *M, const int nverts, const int k) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; int index = i * nverts + j; if (i < nverts && j < nverts) { int Mij = M[index]; if (i != j && i != k && j != k) { int Mikj = M[i * nverts + k] + M[k * nverts + j]; M[index] = (Mij > Mikj) ? Mikj : Mij; } } } //************************************************************************** // FIND MAX IN VECTOR __global__ void reduceMax(int * V_in, int * V_out, const int N) { extern __shared__ int sdata[]; int tid = threadIdx.x; int mid = blockDim.x/2; int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; sdata[tid] = ((i < N) ? V_in[i] : -1); sdata[tid + blockDim.x] = ((i + blockDim.x) < N ? V_in[i + blockDim.x] : -1); __syncthreads(); for(int s = mid; s > 0; s >>= 1) { if (tid < s) { if(sdata[tid] < sdata[tid + s]) { sdata[tid] = sdata[tid + s]; } } else if((i + blockDim.x + s) < N) { if(sdata[tid + mid] < sdata[tid + mid + s]) { sdata[tid + mid] = sdata[tid + mid + s]; } } __syncthreads(); } if (tid == 0) { V_out[blockIdx.x * 2] = sdata[0]; } else if (tid == mid) { V_out[(blockIdx.x * 2) + 1] = sdata[tid + mid]; } } int main(int argc, char *argv[]) { if (argc != 2) { cerr << "Sintaxis: " << argv[0] << " <archivo de grafo>" << endl; return (-1); } //Get GPU information int devID; cudaDeviceProp props; cudaError_t err; err = cudaGetDevice(&devID); if (err != cudaSuccess) { cout << "ERRORRR" << endl; } cudaGetDeviceProperties(&props, devID); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); Graph G; G.lee(argv[1]); // Read the Graph //cout << "EL Grafo de entrada es:"<<endl; //G.imprime(); const int nverts = G.vertices; const int niters = nverts; const int nverts2 = nverts * nverts; int *c_Out_M = new int[nverts2]; int size = nverts2 * sizeof(int); int *d_In_M = NULL; err = cudaMalloc((void **)&d_In_M, size); if (err != cudaSuccess) { cout << "ERROR RESERVA" << endl; } int *A = G.Get_Matrix(); // GPU phase double t1 = cpuSecond(); err = cudaMemcpy(d_In_M, A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { cout << "ERROR COPIA A GPU" << endl; } for (int k = 0; k < niters; k++) { //printf("CUDA kernel launch \n"); int threadsPerDim = sqrt(blocksize); dim3 threadsPerBlock (threadsPerDim, threadsPerDim); dim3 numBlocks( ceil((float)nverts/threadsPerBlock.x), ceil((float)nverts/threadsPerBlock.y) ); floyd_kernel<<<numBlocks, threadsPerBlock>>>(d_In_M, nverts, k); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel! ERROR= %d\n", err); exit(EXIT_FAILURE); } } cudaMemcpy(c_Out_M, d_In_M, size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); double Tgpu = cpuSecond() - t1; cout << "Tiempo gastado GPU= " << Tgpu << endl << endl; // CPU phase t1 = cpuSecond(); // BUCLE PPAL DEL ALGORITMO int inj, in, kn; for (int k = 0; k < niters; k++) { kn = k * nverts; for (int i = 0; i < nverts; i++) { in = i * nverts; for (int j = 0; j < nverts; j++) if (i != j && i != k && j != k) { inj = in + j; A[inj] = min(A[in + k] + A[kn + j], A[inj]); } } } double t2 = cpuSecond() - t1; cout << "Tiempo gastado CPU= " << t2 << endl << endl; cout << "Ganancia= " << t2 / Tgpu << endl; for (int i = 0; i < nverts; i++) for (int j = 0; j < nverts; j++) if (abs(c_Out_M[i * nverts + j] - G.arista(i, j)) > 0) cout << "Error (" << i << "," << j << ") " << c_Out_M[i * nverts + j] << "..." << G.arista(i, j) << endl; // c_d Maximum computation on GPU dim3 threadsPerBlock(blocksize); dim3 numBlocks( ceil ((float)(nverts2 / 2)/threadsPerBlock.x)); // Maximum vector on CPU int * vmax; vmax = (int*) malloc(2*numBlocks.x*sizeof(int)); // Maximum vector to be computed on GPU int *vmax_d; cudaMalloc ((void **) &vmax_d, sizeof(int)*2*numBlocks.x); int smemSize = 2*threadsPerBlock.x*sizeof(int); // Kernel launch to compute Minimum Vector reduceMax<<<numBlocks, threadsPerBlock, smemSize>>>(c_Out_M,vmax_d, nverts2); /* Copy data from device memory to host memory */ cudaMemcpy(vmax, vmax_d, 2*numBlocks.x*sizeof(int),cudaMemcpyDeviceToHost); // Perform final reduction in CPU int max_gpu = -1; for (int i=0; i<numBlocks.x * 2; i++) { max_gpu =max(max_gpu,vmax[i]); } cout << endl << " Camino más largo entre los caminos mínimos = " << max_gpu << endl; }
1ed960ca1af75b61e1c13863d4378d4a080b320e.hip
// !!! This is a file automatically generated by hipify!!! /* File name: simulation.cpp Date: 2009/04/01 02:05 Author: Aaron Thompson and Lukas Vlcek Copyright (C) 2009 Aaron Thompson and Lukas Vlcek This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License in a file called COPYING along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ // Very ugly, should be reorganized #include <string> #include <iostream> #include <fstream> #include <math.h> #include <time.h> #include <stdlib.h> #include <stdio.h> #include <hip/hip_vector_types.h> #include "runGPU.h" #include "simulation.h" using namespace std; simulation *SimulationInit(char *name_sim) { FILE *fin; char *saux; simulation* sim; sim = (simulation *)malloc(sizeof(simulation)); sim->simname = (char *)malloc(LINE_LENGTH); strcpy(sim->simname, name_sim); params* pars; pars = (params *)malloc(sizeof(params)); integr* integ; integ = (integr *)malloc(sizeof(integr)); // read data from .inp fin = fopen(strcat(name_sim, ".inp") , "r"); saux = (char *) malloc(LINE_LENGTH); // ensemble integ->ensemble = (char *)malloc(3); integ->enstype = 0; fscanf (fin, "%s %s", saux, integ->ensemble); if (strcmp(integ->ensemble, "nvt") == 0) { integ->enstype = 1; fscanf (fin, "%s %f", saux, &integ->temper_0); } if (strcmp(integ->ensemble, "npt") == 0) { integ->enstype = 2; fscanf (fin, "%s %f %f", saux, &integ->temper_0, &integ->press_0); } // time and time steps fscanf (fin, "%s %d %d %d %f", saux, &pars->timeq, &pars->timrun, &pars->timprn, &integ->dt); fscanf (fin, "%s %f %d", saux, &pars->rcutsq, &pars->lkspace); // force & energy calcuation parameters if (pars->lkspace == 0) // 0 ... reaction field { float eps; fscanf (fin,"%s %f", saux, &eps); pars->reacf = 2.0f*(eps - 1.0f)/(2.0f*eps + 1.0f)/pow(pars->rcutsq, 3); } else if (pars->lkspace == 1) { // 1 ... Wolf fscanf (fin,"%s %f", saux, &pars->alfa); } else //2 ... Ewald { fscanf (fin,"%s %d", saux, &pars->kmax); pars->alfa = 3.09/pars->rcutsq; // one way to estimate good alpha (?) } pars->rcutsq *= pars->rcutsq; // square of cutoff distance // neighbor list skin fscanf (fin, "%s %f %d", saux, &pars->skin, &pars->nmaxlist); // PDF bin size fscanf (fin, "%s %f", saux, &pars->rdel); free(saux); fclose(fin); // read data from .fld strcpy(name_sim, sim->simname); ffield* fld; fld = (ffield *)malloc(sizeof(ffield)); fld = FFieldInput(name_sim); // read data from .cfg strcpy(name_sim, sim->simname); config* conf; conf = (config *)malloc(sizeof(config)); conf = ConfigInput(name_sim, fld->mass); sim->pars = pars; sim->integ = integ; sim->fld = fld; sim->conf = conf; return(sim); } void SimulationRun(simulation* sim) { time_t start, end; printf( "About to run simulation on GPU ...\n" ); time(&start); SimulationRun_CUDA(sim->pars, sim->integ, sim->fld, sim->conf); time(&end); printf( "Finished simulation.\n Total time: %f s.\n", difftime(end, start)); } void SimulationFinish(simulation* sim) { FFieldFinish(sim->fld); ConfigFinish(sim->conf, sim->fld, sim->simname); } // ------ Force field ------ ffield* FFieldInput(char *name) { int i, j, iaux, iatom; float faux1, faux2; char *saux; FILE *fin; ffield* fld; fld = (ffield *)malloc(sizeof(ffield)); fin = fopen(strcat(name, ".fld") , "r"); saux = (char *) malloc(LINE_LENGTH); fscanf (fin, "%s %d", saux, &fld->ntype); // number of atom types int nty = NUM_BOND_TYPES; // allocate atom type and ffield structures saux = (char *) malloc(LINE_LENGTH); fld->nt = (int *)malloc( nty * sizeof(int) ); fld->name = (char **)malloc( nty * sizeof(char *) ); for (int i = 0; i < fld->ntype; i++) fld->name[i] = (char *) malloc(2); fld->mass = (float *)malloc( nty * sizeof(float) ); fld->charge = (float *)malloc( nty * sizeof(float) ); fld->lj1 = (float *) malloc ( nty*nty * sizeof(float) ); fld->lj2 = (float *) malloc ( nty*nty * sizeof(float) ); for (int i = 0; i < nty*nty; i++) { fld->lj1[i] = 0.0f; fld->lj2[i] = 0.0f; } // atomic parameters: number, name, mass, charge iatom = 0; for (int it = 0; it < fld->ntype; it++) { fscanf (fin, "%d %d", &iaux, &fld->nt[it]); fscanf (fin, "%s %f %f", fld->name[it], &fld->mass[it], &fld->charge[it]); fld->mass[it] *= 10000.0; // g/mol*(A/fs)**2 -> kJ/mol*(m/s) iatom += fld->nt[it]; } // VdW int nvdw; fscanf (fin, "%s %d", saux, &nvdw); for (int it = 0; it < nvdw; it++) { fscanf (fin, "%d %d %s %f %f", &i, &j, saux, &faux1, &faux2); i--; j--; if (strcmp(saux, "LJ") == 0) // LJ form { // faux2 *= PH_kB*PH_Na/1000.0f; // K->kJ/mol if needed // for forces calculation - 12 and 6 factors included fld->lj1[i*fld->ntype + j] = 12.0f*4.0f*faux2*pow(faux1, 12); fld->lj1[j*fld->ntype + i] = fld->lj1[i*fld->ntype + j]; fld->lj2[i*fld->ntype + j] = 6.0f*4.0f*faux2*pow(faux1, 6); fld->lj2[j*fld->ntype + i] = fld->lj2[i*fld->ntype + j]; } else if (strcmp(saux, "12-6") == 0) // 12-6 form { // for forces calculation - 12 and 6 factors included fld->lj1[i*fld->ntype + j] = 12.0f*faux1; fld->lj1[j*fld->ntype + i] = fld->lj1[i*fld->ntype + j]; fld->lj2[i*fld->ntype + j] = 6.0f*faux2; fld->lj2[j*fld->ntype + i] = fld->lj2[i*fld->ntype + j]; } } // Fixed atoms // fscanf (fin, "%s %d", saux, &fld->nfixed); // for (int it = 0; it < fld->nfixed; it++) { ; } // Rigid molecules (just water) // fscanf (fin, "%s %d", saux, &fld->nrigid); // for (int it = 0; it < fld->nrigid; it++) { ; } // Bond types fscanf (fin, "%s %d", saux, &fld->tbonds); // bond types int nbo = NUM_BOND_TYPES; fld->bnd1 = (float *)malloc( nbo * sizeof(float) ); fld->bnd2 = (float *)malloc( nbo * sizeof(float) ); for (int i = 0; i < nbo*nbo; i++) { fld->bnd1[i] = 0.0f; fld->bnd2[i] = 0.0f; } for (int it = 0; it < fld->tbonds; it++) { fscanf (fin, "%d %f %f", &iaux, &fld->bnd1[it], &fld->bnd2[it]); fld->bnd1[it] *= 2.0f; // } // Angle types fscanf (fin, "%s %d", saux, &fld->tangle); // bond types int nag = NUM_BOND_TYPES; fld->ang1 = (float *)malloc( nag * sizeof(float) ); fld->ang2 = (float *)malloc( nag * sizeof(float) ); for (int i = 0; i < nag*nag; i++) { fld->ang1[i] = 0.0f; fld->ang2[i] = 0.0f; } for (int it = 0; it < fld->tangle; it++) { fscanf (fin, "%d %f %f", &iaux, &fld->ang1[it], &fld->ang2[it]); fld->ang1[it] *= 2.0f; // fld->ang2[it] *= M_Pi/180.0f; // deg2rad } // Bond list fscanf (fin, "%s %d", saux, &fld->nbonds); fld->iatn = (uint4 *)malloc( fld->nbonds * sizeof(uint4) ); for (int it = 0; it < fld->nbonds; it++) { fscanf (fin, "%d %d %d %d", &iaux, &fld->iatn[it].w, &fld->iatn[it].x, &fld->iatn[it].y); } // Angle list fscanf (fin, "%s %d", saux, &fld->nangle); fld->iang = (uint4 *)malloc( fld->nangle * sizeof(uint4) ); for (int it = 0; it < fld->nangle; it++) { fscanf (fin, "%d %d %d %d %d", &iaux, &fld->iang[it].w, &fld->iang[it].x, &fld->iang[it].y, &fld->iang[it].z); } free(saux); fclose(fin); return(fld); } void FFieldFinish(ffield* fld) { free(fld->nt); free(fld->name); free(fld->mass); free(fld->charge); free(fld->lj1); free(fld->lj2); } // ------ Configuration ------ config* ConfigInput(char *name, float *mass) { int i, iaux, cftyp; FILE *fin; config* conf; conf = (config *)malloc(sizeof(config)); fin = fopen(strcat(name, ".cfg") , "r"); fscanf (fin, "%d %d", &conf->natom, &cftyp); fscanf (fin, "%f %f %f", &conf->box.x, &conf->box.y, &conf->box.z); conf->boxi.x = 1.0/conf->box.x; conf->boxi.y = 1.0/conf->box.y; conf->boxi.z = 1.0/conf->box.z; // allocate configuration arrays conf->itype = (int *)malloc( conf->natom * sizeof(int) ); conf->pos = (float4 *)malloc( conf->natom * sizeof(float4) ); conf->vel = (float4 *)malloc( conf->natom * sizeof(float4) ); conf->force = (float4 *)malloc( conf->natom * sizeof(float4) ); for (i = 0; i < conf->natom; i++) { conf->pos[i].x = 0.0; conf->pos[i].y = 0.0; conf->pos[i].z = 0.0; conf->pos[i].w = 0.0; conf->vel[i].x = 0.0; conf->vel[i].y = 0.0; conf->vel[i].z = 0.0; conf->vel[i].w = 0.0; conf->force[i].x = 0.0; conf->force[i].y = 0.0; conf->force[i].z = 0.0; conf->force[i].w = 0.0; } for (i = 0; i < conf->natom; i++) { fscanf (fin, "%d %d %f", &iaux, &conf->itype[i], &conf->pos[i].w); conf->itype[i] = conf->itype[i] - 1; conf->vel[i].w = mass[conf->itype[i]]; fscanf (fin, "%f %f %f", &conf->pos[i].x, &conf->pos[i].y, &conf->pos[i].z); if (cftyp > 0) fscanf (fin, "%f %f %f", &conf->vel[i].x, &conf->vel[i].y, &conf->vel[i].z); } fclose(fin); return(conf); } void ConfigFinish(config* conf, ffield* fld, char *name) { FILE *fout; // write an xyz file fout = fopen(strcat(name, ".xyz") , "w"); fprintf (fout, "%d\n", conf->natom); fprintf (fout, "%f %f %f\n", conf->box.x, conf->box.y, conf->box.z); for (int i = 0; i < conf->natom; i++) fprintf (fout, "%s %f %f %f\n", fld->name[conf->itype[i]], conf->pos[i].x, conf->pos[i].y, conf->pos[i].z); fclose(fout); // write a configuration file fout = fopen(strcat(name, ".cfg") , "w"); fprintf (fout, "%d %d\n", conf->natom, 1); fprintf (fout, "%f %f %f\n", conf->box.x, conf->box.y, conf->box.z); for (int i = 0; i < conf->natom; i++) { fprintf (fout, "%d %d %f\n", i+1, conf->itype[i]+1, conf->pos[i].w); fprintf (fout, "%f %f %f\n", conf->pos[i].x, conf->pos[i].y, conf->pos[i].z); fprintf (fout, "%f %f %f\n", conf->vel[i].x, conf->vel[i].y, conf->vel[i].z); } fclose(fout); free(conf->itype); free(conf->pos); free(conf->force); } /* end of simulation.cpp */
1ed960ca1af75b61e1c13863d4378d4a080b320e.cu
/* File name: simulation.cpp Date: 2009/04/01 02:05 Author: Aaron Thompson and Lukas Vlcek Copyright (C) 2009 Aaron Thompson and Lukas Vlcek This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License in a file called COPYING along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ // Very ugly, should be reorganized #include <string> #include <iostream> #include <fstream> #include <math.h> #include <time.h> #include <stdlib.h> #include <stdio.h> #include <vector_types.h> #include "runGPU.h" #include "simulation.h" using namespace std; simulation *SimulationInit(char *name_sim) { FILE *fin; char *saux; simulation* sim; sim = (simulation *)malloc(sizeof(simulation)); sim->simname = (char *)malloc(LINE_LENGTH); strcpy(sim->simname, name_sim); params* pars; pars = (params *)malloc(sizeof(params)); integr* integ; integ = (integr *)malloc(sizeof(integr)); // read data from .inp fin = fopen(strcat(name_sim, ".inp") , "r"); saux = (char *) malloc(LINE_LENGTH); // ensemble integ->ensemble = (char *)malloc(3); integ->enstype = 0; fscanf (fin, "%s %s", saux, integ->ensemble); if (strcmp(integ->ensemble, "nvt") == 0) { integ->enstype = 1; fscanf (fin, "%s %f", saux, &integ->temper_0); } if (strcmp(integ->ensemble, "npt") == 0) { integ->enstype = 2; fscanf (fin, "%s %f %f", saux, &integ->temper_0, &integ->press_0); } // time and time steps fscanf (fin, "%s %d %d %d %f", saux, &pars->timeq, &pars->timrun, &pars->timprn, &integ->dt); fscanf (fin, "%s %f %d", saux, &pars->rcutsq, &pars->lkspace); // force & energy calcuation parameters if (pars->lkspace == 0) // 0 ... reaction field { float eps; fscanf (fin,"%s %f", saux, &eps); pars->reacf = 2.0f*(eps - 1.0f)/(2.0f*eps + 1.0f)/pow(pars->rcutsq, 3); } else if (pars->lkspace == 1) { // 1 ... Wolf fscanf (fin,"%s %f", saux, &pars->alfa); } else //2 ... Ewald { fscanf (fin,"%s %d", saux, &pars->kmax); pars->alfa = 3.09/pars->rcutsq; // one way to estimate good alpha (?) } pars->rcutsq *= pars->rcutsq; // square of cutoff distance // neighbor list skin fscanf (fin, "%s %f %d", saux, &pars->skin, &pars->nmaxlist); // PDF bin size fscanf (fin, "%s %f", saux, &pars->rdel); free(saux); fclose(fin); // read data from .fld strcpy(name_sim, sim->simname); ffield* fld; fld = (ffield *)malloc(sizeof(ffield)); fld = FFieldInput(name_sim); // read data from .cfg strcpy(name_sim, sim->simname); config* conf; conf = (config *)malloc(sizeof(config)); conf = ConfigInput(name_sim, fld->mass); sim->pars = pars; sim->integ = integ; sim->fld = fld; sim->conf = conf; return(sim); } void SimulationRun(simulation* sim) { time_t start, end; printf( "About to run simulation on GPU ...\n" ); time(&start); SimulationRun_CUDA(sim->pars, sim->integ, sim->fld, sim->conf); time(&end); printf( "Finished simulation.\n Total time: %f s.\n", difftime(end, start)); } void SimulationFinish(simulation* sim) { FFieldFinish(sim->fld); ConfigFinish(sim->conf, sim->fld, sim->simname); } // ------ Force field ------ ffield* FFieldInput(char *name) { int i, j, iaux, iatom; float faux1, faux2; char *saux; FILE *fin; ffield* fld; fld = (ffield *)malloc(sizeof(ffield)); fin = fopen(strcat(name, ".fld") , "r"); saux = (char *) malloc(LINE_LENGTH); fscanf (fin, "%s %d", saux, &fld->ntype); // number of atom types int nty = NUM_BOND_TYPES; // allocate atom type and ffield structures saux = (char *) malloc(LINE_LENGTH); fld->nt = (int *)malloc( nty * sizeof(int) ); fld->name = (char **)malloc( nty * sizeof(char *) ); for (int i = 0; i < fld->ntype; i++) fld->name[i] = (char *) malloc(2); fld->mass = (float *)malloc( nty * sizeof(float) ); fld->charge = (float *)malloc( nty * sizeof(float) ); fld->lj1 = (float *) malloc ( nty*nty * sizeof(float) ); fld->lj2 = (float *) malloc ( nty*nty * sizeof(float) ); for (int i = 0; i < nty*nty; i++) { fld->lj1[i] = 0.0f; fld->lj2[i] = 0.0f; } // atomic parameters: number, name, mass, charge iatom = 0; for (int it = 0; it < fld->ntype; it++) { fscanf (fin, "%d %d", &iaux, &fld->nt[it]); fscanf (fin, "%s %f %f", fld->name[it], &fld->mass[it], &fld->charge[it]); fld->mass[it] *= 10000.0; // g/mol*(A/fs)**2 -> kJ/mol*(m/s) iatom += fld->nt[it]; } // VdW int nvdw; fscanf (fin, "%s %d", saux, &nvdw); for (int it = 0; it < nvdw; it++) { fscanf (fin, "%d %d %s %f %f", &i, &j, saux, &faux1, &faux2); i--; j--; if (strcmp(saux, "LJ") == 0) // LJ form { // faux2 *= PH_kB*PH_Na/1000.0f; // K->kJ/mol if needed // for forces calculation - 12 and 6 factors included fld->lj1[i*fld->ntype + j] = 12.0f*4.0f*faux2*pow(faux1, 12); fld->lj1[j*fld->ntype + i] = fld->lj1[i*fld->ntype + j]; fld->lj2[i*fld->ntype + j] = 6.0f*4.0f*faux2*pow(faux1, 6); fld->lj2[j*fld->ntype + i] = fld->lj2[i*fld->ntype + j]; } else if (strcmp(saux, "12-6") == 0) // 12-6 form { // for forces calculation - 12 and 6 factors included fld->lj1[i*fld->ntype + j] = 12.0f*faux1; fld->lj1[j*fld->ntype + i] = fld->lj1[i*fld->ntype + j]; fld->lj2[i*fld->ntype + j] = 6.0f*faux2; fld->lj2[j*fld->ntype + i] = fld->lj2[i*fld->ntype + j]; } } // Fixed atoms // fscanf (fin, "%s %d", saux, &fld->nfixed); // for (int it = 0; it < fld->nfixed; it++) { ; } // Rigid molecules (just water) // fscanf (fin, "%s %d", saux, &fld->nrigid); // for (int it = 0; it < fld->nrigid; it++) { ; } // Bond types fscanf (fin, "%s %d", saux, &fld->tbonds); // bond types int nbo = NUM_BOND_TYPES; fld->bnd1 = (float *)malloc( nbo * sizeof(float) ); fld->bnd2 = (float *)malloc( nbo * sizeof(float) ); for (int i = 0; i < nbo*nbo; i++) { fld->bnd1[i] = 0.0f; fld->bnd2[i] = 0.0f; } for (int it = 0; it < fld->tbonds; it++) { fscanf (fin, "%d %f %f", &iaux, &fld->bnd1[it], &fld->bnd2[it]); fld->bnd1[it] *= 2.0f; // } // Angle types fscanf (fin, "%s %d", saux, &fld->tangle); // bond types int nag = NUM_BOND_TYPES; fld->ang1 = (float *)malloc( nag * sizeof(float) ); fld->ang2 = (float *)malloc( nag * sizeof(float) ); for (int i = 0; i < nag*nag; i++) { fld->ang1[i] = 0.0f; fld->ang2[i] = 0.0f; } for (int it = 0; it < fld->tangle; it++) { fscanf (fin, "%d %f %f", &iaux, &fld->ang1[it], &fld->ang2[it]); fld->ang1[it] *= 2.0f; // fld->ang2[it] *= M_Pi/180.0f; // deg2rad } // Bond list fscanf (fin, "%s %d", saux, &fld->nbonds); fld->iatn = (uint4 *)malloc( fld->nbonds * sizeof(uint4) ); for (int it = 0; it < fld->nbonds; it++) { fscanf (fin, "%d %d %d %d", &iaux, &fld->iatn[it].w, &fld->iatn[it].x, &fld->iatn[it].y); } // Angle list fscanf (fin, "%s %d", saux, &fld->nangle); fld->iang = (uint4 *)malloc( fld->nangle * sizeof(uint4) ); for (int it = 0; it < fld->nangle; it++) { fscanf (fin, "%d %d %d %d %d", &iaux, &fld->iang[it].w, &fld->iang[it].x, &fld->iang[it].y, &fld->iang[it].z); } free(saux); fclose(fin); return(fld); } void FFieldFinish(ffield* fld) { free(fld->nt); free(fld->name); free(fld->mass); free(fld->charge); free(fld->lj1); free(fld->lj2); } // ------ Configuration ------ config* ConfigInput(char *name, float *mass) { int i, iaux, cftyp; FILE *fin; config* conf; conf = (config *)malloc(sizeof(config)); fin = fopen(strcat(name, ".cfg") , "r"); fscanf (fin, "%d %d", &conf->natom, &cftyp); fscanf (fin, "%f %f %f", &conf->box.x, &conf->box.y, &conf->box.z); conf->boxi.x = 1.0/conf->box.x; conf->boxi.y = 1.0/conf->box.y; conf->boxi.z = 1.0/conf->box.z; // allocate configuration arrays conf->itype = (int *)malloc( conf->natom * sizeof(int) ); conf->pos = (float4 *)malloc( conf->natom * sizeof(float4) ); conf->vel = (float4 *)malloc( conf->natom * sizeof(float4) ); conf->force = (float4 *)malloc( conf->natom * sizeof(float4) ); for (i = 0; i < conf->natom; i++) { conf->pos[i].x = 0.0; conf->pos[i].y = 0.0; conf->pos[i].z = 0.0; conf->pos[i].w = 0.0; conf->vel[i].x = 0.0; conf->vel[i].y = 0.0; conf->vel[i].z = 0.0; conf->vel[i].w = 0.0; conf->force[i].x = 0.0; conf->force[i].y = 0.0; conf->force[i].z = 0.0; conf->force[i].w = 0.0; } for (i = 0; i < conf->natom; i++) { fscanf (fin, "%d %d %f", &iaux, &conf->itype[i], &conf->pos[i].w); conf->itype[i] = conf->itype[i] - 1; conf->vel[i].w = mass[conf->itype[i]]; fscanf (fin, "%f %f %f", &conf->pos[i].x, &conf->pos[i].y, &conf->pos[i].z); if (cftyp > 0) fscanf (fin, "%f %f %f", &conf->vel[i].x, &conf->vel[i].y, &conf->vel[i].z); } fclose(fin); return(conf); } void ConfigFinish(config* conf, ffield* fld, char *name) { FILE *fout; // write an xyz file fout = fopen(strcat(name, ".xyz") , "w"); fprintf (fout, "%d\n", conf->natom); fprintf (fout, "%f %f %f\n", conf->box.x, conf->box.y, conf->box.z); for (int i = 0; i < conf->natom; i++) fprintf (fout, "%s %f %f %f\n", fld->name[conf->itype[i]], conf->pos[i].x, conf->pos[i].y, conf->pos[i].z); fclose(fout); // write a configuration file fout = fopen(strcat(name, ".cfg") , "w"); fprintf (fout, "%d %d\n", conf->natom, 1); fprintf (fout, "%f %f %f\n", conf->box.x, conf->box.y, conf->box.z); for (int i = 0; i < conf->natom; i++) { fprintf (fout, "%d %d %f\n", i+1, conf->itype[i]+1, conf->pos[i].w); fprintf (fout, "%f %f %f\n", conf->pos[i].x, conf->pos[i].y, conf->pos[i].z); fprintf (fout, "%f %f %f\n", conf->vel[i].x, conf->vel[i].y, conf->vel[i].z); } fclose(fout); free(conf->itype); free(conf->pos); free(conf->force); } /* end of simulation.cpp */
abcec60289b3b7ae7f0fac88bd257e492cb94448.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void orcu_kernel2018(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) { const int tid=blockIdx.x*blockDim.x+threadIdx.x; const int gsize=gridDim.x*blockDim.x; double ysum; int j, k, col, row; for (int i=tid; i<=nrows-1; i+=gsize) { { ysum=0.0; for (j=0; j<=ndiags-1; j++ ) { row=i+j*sbdiag; col=(floor((float)i/ndofs)+offsets[j])*ndofs; if (col>=0&&col<nrows) for (k=0; k<=ndofs-1; k++ ) ysum=ysum+A[row+k*nrows]*x[col+k]; } y[i]=ysum; } } } void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) { register int i,j,k; int col,row; double ysum; /*@ begin PerfTuning ( def performance_params { param TC[] = range(32,1025,32); param BC[] = range(14,105,14); param PL[] = [16,32,48]; } def input_params { param M[] = [32]; param N[] = [32]; param P[] = [32]; param NOS = 7; param DOF[] = range(1,17); constraint c1 = (M==N); constraint c2 = (N==P); } def input_vars { decl dynamic double A[M*N*P*DOF*DOF*NOS] = random; decl dynamic double x[M*N*P*DOF] = random; decl dynamic double y[M*N*P*DOF] = 0; decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF}; } ) @*/ /**-- (Generated by Orio) Best performance cost: [0.062176000000000002, 0.039744000000000002, 0.037887999999999998, 0.037311999999999998, 0.037791999999999999] Tuned for specific problem sizes: DOF = 1 M = 32 N = 32 NOS = 7 P = 32 Best performance parameters: BC = 56 PL = 16 TC = 352 --**/ int nrows=M*N*P*DOF; int ndiags=NOS; int ndofs=DOF; int sbdiag=M*N*P*DOF*DOF; /*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL) for(i=0; i<=nrows-1; i++){ ysum = 0.0; for(j=0; j<=ndiags-1; j++){ row = i+j*sbdiag; col = (floor((float)i/ndofs)+offsets[j])*ndofs; if(col>=0&&col<nrows) for(k=0; k<=ndofs-1; k++) ysum += A[row+k*nrows] * x[col+k]; } y[i] = ysum; } ) @*/ { hipDeviceSynchronize(); /*declare variables*/ double *dev_A, *dev_x, *dev_y; int *dev_offsets; int nthreads=352; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=56; /*allocate device memory*/ hipMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double)); hipMalloc(&dev_x,M *N *P *DOF*sizeof(double)); hipMalloc(&dev_y,M *N *P *DOF*sizeof(double)); hipMalloc(&dev_offsets,NOS*sizeof(int)); hipDeviceSetCacheConfig(hipFuncCachePreferShared); /*copy data from host to device*/ hipEventRecord(tstart,0); hipMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_offsets,offsets,NOS*sizeof(int),hipMemcpyHostToDevice); hipEventRecord(tstop,0); hipEventSynchronize(tstop); hipEventElapsedTime(&orcu_transfer,tstart,tstop); hipEventRecord(start,0); /*invoke device kernel*/ hipLaunchKernelGGL(( orcu_kernel2018), dim3(dimGrid),dim3(dimBlock), 0, 0, nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&orcu_elapsed,start,stop); /*copy data from device to host*/ hipMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),hipMemcpyDeviceToHost); hipDeviceSetCacheConfig(hipFuncCachePreferNone); /*free allocated memory*/ hipFree(dev_A); hipFree(dev_x); hipFree(dev_y); hipFree(dev_offsets); hipError_t err=hipGetLastError(); if (hipSuccess!=err) printf("CUDA runtime error: %s@",hipGetErrorString(err)); } /*@ end @*/ /*@ end @*/ }
abcec60289b3b7ae7f0fac88bd257e492cb94448.cu
__global__ void orcu_kernel2018(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) { const int tid=blockIdx.x*blockDim.x+threadIdx.x; const int gsize=gridDim.x*blockDim.x; double ysum; int j, k, col, row; for (int i=tid; i<=nrows-1; i+=gsize) { { ysum=0.0; for (j=0; j<=ndiags-1; j++ ) { row=i+j*sbdiag; col=(floor((float)i/ndofs)+offsets[j])*ndofs; if (col>=0&&col<nrows) for (k=0; k<=ndofs-1; k++ ) ysum=ysum+A[row+k*nrows]*x[col+k]; } y[i]=ysum; } } } void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) { register int i,j,k; int col,row; double ysum; /*@ begin PerfTuning ( def performance_params { param TC[] = range(32,1025,32); param BC[] = range(14,105,14); param PL[] = [16,32,48]; } def input_params { param M[] = [32]; param N[] = [32]; param P[] = [32]; param NOS = 7; param DOF[] = range(1,17); constraint c1 = (M==N); constraint c2 = (N==P); } def input_vars { decl dynamic double A[M*N*P*DOF*DOF*NOS] = random; decl dynamic double x[M*N*P*DOF] = random; decl dynamic double y[M*N*P*DOF] = 0; decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF}; } ) @*/ /**-- (Generated by Orio) Best performance cost: [0.062176000000000002, 0.039744000000000002, 0.037887999999999998, 0.037311999999999998, 0.037791999999999999] Tuned for specific problem sizes: DOF = 1 M = 32 N = 32 NOS = 7 P = 32 Best performance parameters: BC = 56 PL = 16 TC = 352 --**/ int nrows=M*N*P*DOF; int ndiags=NOS; int ndofs=DOF; int sbdiag=M*N*P*DOF*DOF; /*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL) for(i=0; i<=nrows-1; i++){ ysum = 0.0; for(j=0; j<=ndiags-1; j++){ row = i+j*sbdiag; col = (floor((float)i/ndofs)+offsets[j])*ndofs; if(col>=0&&col<nrows) for(k=0; k<=ndofs-1; k++) ysum += A[row+k*nrows] * x[col+k]; } y[i] = ysum; } ) @*/ { cudaDeviceSynchronize(); /*declare variables*/ double *dev_A, *dev_x, *dev_y; int *dev_offsets; int nthreads=352; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=56; /*allocate device memory*/ cudaMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double)); cudaMalloc(&dev_x,M *N *P *DOF*sizeof(double)); cudaMalloc(&dev_y,M *N *P *DOF*sizeof(double)); cudaMalloc(&dev_offsets,NOS*sizeof(int)); cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); /*copy data from host to device*/ cudaEventRecord(tstart,0); cudaMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_offsets,offsets,NOS*sizeof(int),cudaMemcpyHostToDevice); cudaEventRecord(tstop,0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&orcu_transfer,tstart,tstop); cudaEventRecord(start,0); /*invoke device kernel*/ orcu_kernel2018<<<dimGrid,dimBlock>>>(nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&orcu_elapsed,start,stop); /*copy data from device to host*/ cudaMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),cudaMemcpyDeviceToHost); cudaDeviceSetCacheConfig(cudaFuncCachePreferNone); /*free allocated memory*/ cudaFree(dev_A); cudaFree(dev_x); cudaFree(dev_y); cudaFree(dev_offsets); cudaError_t err=cudaGetLastError(); if (cudaSuccess!=err) printf("CUDA runtime error: %s@",cudaGetErrorString(err)); } /*@ end @*/ /*@ end @*/ }
7a7e7afd3f0bd26e0110b6fb742ca651369f72a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define BLOCKSIZE 128 #include <Meta/CUDA.h> #include <stdio.h> #include <stdlib.h> #include "Solid.h" #include "Shapes.h" #include "VboManager.h" #include "ColorRamp.h" #define crop_last_dim make_float3 // cross product inline __host__ __device__ float4 cross(float4 a, float4 b) { return make_float4(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x, 0.0); } __device__ float4 calcNormal(float4 *v0, float4 *v1, float4 *v2) { float4 edge0 = *v1 - *v0; float4 edge1 = *v2 - *v0; // note - it's faster to perform normalization in vertex shader rather than here return cross(edge1, edge0); } __global__ void applyTransformation_k(float4* modelVert, float4* mat, float4* vert, float4* modelNorm, float4* norm, unsigned int numVerts, unsigned int numThreads) { int me_idx = (numVerts*blockIdx.x) + threadIdx.x; if (threadIdx.x>=numVerts) return; int m_idx = 4*blockIdx.x; // Transform the model vertex vert[me_idx].x = dot(mat[m_idx + 0], modelVert[threadIdx.x]); vert[me_idx].y = dot(mat[m_idx + 1], modelVert[threadIdx.x]); vert[me_idx].z = dot(mat[m_idx + 2], modelVert[threadIdx.x]); vert[me_idx].w = dot(mat[m_idx + 3], modelVert[threadIdx.x]); // Transform the model normal norm[me_idx].x = dot(mat[m_idx + 0], modelNorm[threadIdx.x]); norm[me_idx].y = dot(mat[m_idx + 1], modelNorm[threadIdx.x]); norm[me_idx].z = dot(mat[m_idx + 2], modelNorm[threadIdx.x]); norm[me_idx].w = dot(mat[m_idx + 3], modelNorm[threadIdx.x]); } /** * This function applies the matrix transformation to each * vertex in the polygon model. Start as many threats as there are * vertices in the model. */ void applyTransformation(VisualBuffer& vert, VisualBuffer& norm) { unsigned int gridSize = vert.numElm; unsigned int numVerticesInModel = vert.numIndices / vert.numElm; // this is the number of indices in one model. //unsigned int blockSize = numVerticesInModel; // number of indices pr. elm (box=36) unsigned int blockSize = (int)ceil((float)numVerticesInModel/BLOCKSIZE) * BLOCKSIZE; //printf("Grid: %i block: %i - numThreads: %i - numVerts: %i\n", gridSize, blockSize, vb.numIndices, numVerticesInModel); //printf("modelAddr: %i - bufAddr: %i\n", vb.modelBuf, vb.buf); hipLaunchKernelGGL(( applyTransformation_k) , dim3(make_uint3(gridSize,1,1)), dim3(make_uint3(blockSize,1,1)), 0, 0, vert.modelVertBuf, vert.matBuf, vert.buf, vert.modelNormBuf, norm.buf, numVerticesInModel, vert.numIndices); CUT_CHECK_ERROR("Error applying transformations"); } __global__ void updateSurface_k(float4* vertBuf, float4* normBuf, Surface surface, Point *points, float4 *displacements) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=surface.numFaces) return; Triangle triangle = surface.faces[me_idx]; float4 pos, pos2, pos3, displacement; pos = points[triangle.x]; displacement = displacements[triangle.x]; pos.x += displacement.x; pos.y += displacement.y; pos.z += displacement.z; vertBuf[(me_idx*3)+0] = pos; pos2 = points[triangle.y]; displacement = displacements[triangle.y]; pos2.x += displacement.x; pos2.y += displacement.y; pos2.z += displacement.z; vertBuf[(me_idx*3)+1] = pos2; pos3 = points[triangle.z]; displacement = displacements[triangle.z]; pos3.x += displacement.x; pos3.y += displacement.y; pos3.z += displacement.z; vertBuf[(me_idx*3)+2] = pos3; float4 normal = calcNormal(&pos,&pos3,&pos2); normBuf[(3*me_idx)+0] = normal; normBuf[(3*me_idx)+1] = normal; normBuf[(3*me_idx)+2] = normal; } void updateSurface(Solid* solid, VboManager* vbom) { int gridSize = (int)ceil(((float)solid->surface->numFaces)/BLOCKSIZE); hipLaunchKernelGGL(( updateSurface_k), dim3(make_uint3(gridSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, vbom->GetBuf(SURFACE_VERTICES).buf, vbom->GetBuf(SURFACE_NORMALS).buf, *solid->surface, solid->vertexpool->data, solid->vertexpool->Ui_t); } __global__ void updateCenterOfMass_k(float4* buf, Body body, Point* points, float4* displacements) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; //if( me_idx!=654) return; // TEMP TEST if (me_idx>=body.numTetrahedra) return; Tetrahedron tetra = body.tetrahedra[me_idx]; float4 pos0, pos1, pos2, pos3; pos0 = points[tetra.x] + displacements[tetra.x]; pos1 = points[tetra.y] + displacements[tetra.y]; pos2 = points[tetra.z] + displacements[tetra.z]; pos3 = points[tetra.w] + displacements[tetra.w]; float4 center = (pos0 + pos1 + pos2 + pos3) / 4.0; buf[me_idx] = center; } void updateCenterOfMass(Solid* solid, VboManager* vbom) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); hipLaunchKernelGGL(( updateCenterOfMass_k), dim3(make_uint3(gridSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, vbom->GetBuf(CENTER_OF_MASS).buf, *solid->body, solid->vertexpool->data, solid->vertexpool->Ui_t); } __global__ void updateBodyMesh_k(float4* vertBuf, float4* colrBuf, float4* normBuf, Body mesh, Point* points, float4* displacements, float minX) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=mesh.numTetrahedra) return; Tetrahedron tetra = mesh.tetrahedra[me_idx]; float4 a, b, c, d; a = points[tetra.x] + displacements[tetra.x]; b = points[tetra.y] + displacements[tetra.y]; c = points[tetra.z] + displacements[tetra.z]; d = points[tetra.w] + displacements[tetra.w]; // Jump index with 12 since there is 4 faces pr. tetrahedra each with 3 vertices. int norm_idx = me_idx*12; me_idx *= 12; // 0 2 3 vertBuf[me_idx++] = a; vertBuf[me_idx++] = b; vertBuf[me_idx++] = c; // 0 3 1 vertBuf[me_idx++] = a; vertBuf[me_idx++] = c; vertBuf[me_idx++] = d; // 0 1 2 vertBuf[me_idx++] = b; vertBuf[me_idx++] = d; vertBuf[me_idx++] = c; // 1 2 3 vertBuf[me_idx++] = a; vertBuf[me_idx++] = d; vertBuf[me_idx++] = b; // ----------- HARD NORMALS ------------- float4 normal = calcNormal(&a,&b,&c); normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; // Calculate hard normals normal = calcNormal(&a,&c,&d); normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; // Calculate hard normals normal = calcNormal(&b,&d,&c); normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; // Calculate hard normals normal = calcNormal(&a,&d,&b); normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; } void updateBodyMesh(Solid* solid, VboManager* vbom, float minX) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); hipLaunchKernelGGL(( updateBodyMesh_k) , dim3(make_uint3(gridSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, vbom->GetBuf(BODY_MESH).buf, vbom->GetBuf(BODY_COLORS).buf, vbom->GetBuf(BODY_NORMALS).buf, *solid->body, solid->vertexpool->data, solid->vertexpool->Ui_t, minX); } __global__ void updateStressTensors_k(Body body, float4* matBuf, float4* norm, float4* com, float4* eigenVectors, float4* eigenValues) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=body.numTetrahedra) return; //if (me_idx>=1) return; float4 center = com[me_idx]; Matrix4f m(center); int e_idx = me_idx * 3; m.row0 = eigenVectors[e_idx+0]; m.row1 = eigenVectors[e_idx+1]; m.row2 = eigenVectors[e_idx+2]; m.SetPos(center.x, center.y, center.z); /* printf("{%f,%f,%f,%f - %f,%f,%f,%f - %f,%f,%f,%f - %f,%f,%f,%f}\n", m.row0.x, m.row0.y, m.row0.z, m.row0.w, m.row1.x, m.row1.y, m.row1.z, m.row1.w, m.row2.x, m.row2.y, m.row2.z, m.row2.w, m.row3.x, m.row3.y, m.row3.z, m.row3.w); */ m.CopyToBuf(matBuf, me_idx); } void updateStressTensors(Solid* solid, VboManager* vbom) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); hipLaunchKernelGGL(( updateStressTensors_k) , dim3(make_uint3(gridSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, *solid->body, vbom->GetBuf(STRESS_TENSOR_VERTICES).matBuf, vbom->GetBuf(STRESS_TENSOR_NORMALS).buf, vbom->GetBuf(CENTER_OF_MASS).buf, vbom->GetBuf(EIGEN_VECTORS).buf, vbom->GetBuf(EIGEN_VALUES).buf); } __global__ void updateStressTensorNormals_k(Body body, float4* vert, float4* norm, float4* eigenValues) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=body.numTetrahedra) return; } void updateStressTensorNormals(Solid* solid, VboManager* vbom) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); hipLaunchKernelGGL(( updateStressTensorNormals_k) , dim3(make_uint3(gridSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, *solid->body, vbom->GetBuf(STRESS_TENSOR_VERTICES).buf, vbom->GetBuf(STRESS_TENSOR_NORMALS).buf, vbom->GetBuf(EIGEN_VALUES).buf); } __global__ void planeClipping_k(Body body, Point* points, float4* displacements, float4* bodyMesh, float4* bodyColr, float4* com, float minX) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=body.numTetrahedra) return; Tetrahedron tetra = body.tetrahedra[me_idx]; float4 a, b, c, d; a = points[tetra.x] + displacements[tetra.x]; b = points[tetra.y] + displacements[tetra.y]; c = points[tetra.z] + displacements[tetra.z]; d = points[tetra.w] + displacements[tetra.w]; // Jump index with 12 since there is 4 faces pr. tetrahedra each with 3 vertices. int vert_idx = me_idx * 12; if ( a.x < minX || b.x < minX || c.x < minX || d.x < minX ) { for (unsigned int i=0; i<12; i++) { bodyMesh[vert_idx++] = make_float4(0.0,0.0,0.0,0.0); } com[me_idx] = make_float4(-1000.0,0.0,0.0,0.0); } else { for (unsigned int i=0; i<12; i++) { float dist = (minX - com[me_idx].x);// / 10.0f;//((minX - com[me_idx].x)); if( dist < 0 ) { dist *= -1.0f; } //if( dist > 1.0 ) dist = 1.0; bodyColr[vert_idx++].w = 2.0f / dist;//dist;//1.0 - dist; } } } void planeClipping(Solid* solid, VboManager* vbom, float minX) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); hipLaunchKernelGGL(( planeClipping_k), dim3(make_uint3(gridSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, *solid->body, solid->vertexpool->data, solid->vertexpool->Ui_t, vbom->GetBuf(BODY_MESH).buf, vbom->GetBuf(BODY_COLORS).buf, vbom->GetBuf(CENTER_OF_MASS).buf, minX); CHECK_FOR_CUDA_ERROR(); }
7a7e7afd3f0bd26e0110b6fb742ca651369f72a7.cu
#define BLOCKSIZE 128 #include <Meta/CUDA.h> #include <stdio.h> #include <stdlib.h> #include "Solid.h" #include "Shapes.h" #include "VboManager.h" #include "ColorRamp.h" #define crop_last_dim make_float3 // cross product inline __host__ __device__ float4 cross(float4 a, float4 b) { return make_float4(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x, 0.0); } __device__ float4 calcNormal(float4 *v0, float4 *v1, float4 *v2) { float4 edge0 = *v1 - *v0; float4 edge1 = *v2 - *v0; // note - it's faster to perform normalization in vertex shader rather than here return cross(edge1, edge0); } __global__ void applyTransformation_k(float4* modelVert, float4* mat, float4* vert, float4* modelNorm, float4* norm, unsigned int numVerts, unsigned int numThreads) { int me_idx = (numVerts*blockIdx.x) + threadIdx.x; if (threadIdx.x>=numVerts) return; int m_idx = 4*blockIdx.x; // Transform the model vertex vert[me_idx].x = dot(mat[m_idx + 0], modelVert[threadIdx.x]); vert[me_idx].y = dot(mat[m_idx + 1], modelVert[threadIdx.x]); vert[me_idx].z = dot(mat[m_idx + 2], modelVert[threadIdx.x]); vert[me_idx].w = dot(mat[m_idx + 3], modelVert[threadIdx.x]); // Transform the model normal norm[me_idx].x = dot(mat[m_idx + 0], modelNorm[threadIdx.x]); norm[me_idx].y = dot(mat[m_idx + 1], modelNorm[threadIdx.x]); norm[me_idx].z = dot(mat[m_idx + 2], modelNorm[threadIdx.x]); norm[me_idx].w = dot(mat[m_idx + 3], modelNorm[threadIdx.x]); } /** * This function applies the matrix transformation to each * vertex in the polygon model. Start as many threats as there are * vertices in the model. */ void applyTransformation(VisualBuffer& vert, VisualBuffer& norm) { unsigned int gridSize = vert.numElm; unsigned int numVerticesInModel = vert.numIndices / vert.numElm; // this is the number of indices in one model. //unsigned int blockSize = numVerticesInModel; // number of indices pr. elm (box=36) unsigned int blockSize = (int)ceil((float)numVerticesInModel/BLOCKSIZE) * BLOCKSIZE; //printf("Grid: %i block: %i - numThreads: %i - numVerts: %i\n", gridSize, blockSize, vb.numIndices, numVerticesInModel); //printf("modelAddr: %i - bufAddr: %i\n", vb.modelBuf, vb.buf); applyTransformation_k <<<make_uint3(gridSize,1,1), make_uint3(blockSize,1,1)>>> (vert.modelVertBuf, vert.matBuf, vert.buf, vert.modelNormBuf, norm.buf, numVerticesInModel, vert.numIndices); CUT_CHECK_ERROR("Error applying transformations"); } __global__ void updateSurface_k(float4* vertBuf, float4* normBuf, Surface surface, Point *points, float4 *displacements) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=surface.numFaces) return; Triangle triangle = surface.faces[me_idx]; float4 pos, pos2, pos3, displacement; pos = points[triangle.x]; displacement = displacements[triangle.x]; pos.x += displacement.x; pos.y += displacement.y; pos.z += displacement.z; vertBuf[(me_idx*3)+0] = pos; pos2 = points[triangle.y]; displacement = displacements[triangle.y]; pos2.x += displacement.x; pos2.y += displacement.y; pos2.z += displacement.z; vertBuf[(me_idx*3)+1] = pos2; pos3 = points[triangle.z]; displacement = displacements[triangle.z]; pos3.x += displacement.x; pos3.y += displacement.y; pos3.z += displacement.z; vertBuf[(me_idx*3)+2] = pos3; float4 normal = calcNormal(&pos,&pos3,&pos2); normBuf[(3*me_idx)+0] = normal; normBuf[(3*me_idx)+1] = normal; normBuf[(3*me_idx)+2] = normal; } void updateSurface(Solid* solid, VboManager* vbom) { int gridSize = (int)ceil(((float)solid->surface->numFaces)/BLOCKSIZE); updateSurface_k<<<make_uint3(gridSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(vbom->GetBuf(SURFACE_VERTICES).buf, vbom->GetBuf(SURFACE_NORMALS).buf, *solid->surface, solid->vertexpool->data, solid->vertexpool->Ui_t); } __global__ void updateCenterOfMass_k(float4* buf, Body body, Point* points, float4* displacements) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; //if( me_idx!=654) return; // TEMP TEST if (me_idx>=body.numTetrahedra) return; Tetrahedron tetra = body.tetrahedra[me_idx]; float4 pos0, pos1, pos2, pos3; pos0 = points[tetra.x] + displacements[tetra.x]; pos1 = points[tetra.y] + displacements[tetra.y]; pos2 = points[tetra.z] + displacements[tetra.z]; pos3 = points[tetra.w] + displacements[tetra.w]; float4 center = (pos0 + pos1 + pos2 + pos3) / 4.0; buf[me_idx] = center; } void updateCenterOfMass(Solid* solid, VboManager* vbom) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); updateCenterOfMass_k<<<make_uint3(gridSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(vbom->GetBuf(CENTER_OF_MASS).buf, *solid->body, solid->vertexpool->data, solid->vertexpool->Ui_t); } __global__ void updateBodyMesh_k(float4* vertBuf, float4* colrBuf, float4* normBuf, Body mesh, Point* points, float4* displacements, float minX) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=mesh.numTetrahedra) return; Tetrahedron tetra = mesh.tetrahedra[me_idx]; float4 a, b, c, d; a = points[tetra.x] + displacements[tetra.x]; b = points[tetra.y] + displacements[tetra.y]; c = points[tetra.z] + displacements[tetra.z]; d = points[tetra.w] + displacements[tetra.w]; // Jump index with 12 since there is 4 faces pr. tetrahedra each with 3 vertices. int norm_idx = me_idx*12; me_idx *= 12; // 0 2 3 vertBuf[me_idx++] = a; vertBuf[me_idx++] = b; vertBuf[me_idx++] = c; // 0 3 1 vertBuf[me_idx++] = a; vertBuf[me_idx++] = c; vertBuf[me_idx++] = d; // 0 1 2 vertBuf[me_idx++] = b; vertBuf[me_idx++] = d; vertBuf[me_idx++] = c; // 1 2 3 vertBuf[me_idx++] = a; vertBuf[me_idx++] = d; vertBuf[me_idx++] = b; // ----------- HARD NORMALS ------------- float4 normal = calcNormal(&a,&b,&c); normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; // Calculate hard normals normal = calcNormal(&a,&c,&d); normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; // Calculate hard normals normal = calcNormal(&b,&d,&c); normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; // Calculate hard normals normal = calcNormal(&a,&d,&b); normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; normBuf[norm_idx++] = normal; } void updateBodyMesh(Solid* solid, VboManager* vbom, float minX) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); updateBodyMesh_k <<<make_uint3(gridSize,1,1), make_uint3(BLOCKSIZE,1,1)>>> (vbom->GetBuf(BODY_MESH).buf, vbom->GetBuf(BODY_COLORS).buf, vbom->GetBuf(BODY_NORMALS).buf, *solid->body, solid->vertexpool->data, solid->vertexpool->Ui_t, minX); } __global__ void updateStressTensors_k(Body body, float4* matBuf, float4* norm, float4* com, float4* eigenVectors, float4* eigenValues) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=body.numTetrahedra) return; //if (me_idx>=1) return; float4 center = com[me_idx]; Matrix4f m(center); int e_idx = me_idx * 3; m.row0 = eigenVectors[e_idx+0]; m.row1 = eigenVectors[e_idx+1]; m.row2 = eigenVectors[e_idx+2]; m.SetPos(center.x, center.y, center.z); /* printf("{%f,%f,%f,%f - %f,%f,%f,%f - %f,%f,%f,%f - %f,%f,%f,%f}\n", m.row0.x, m.row0.y, m.row0.z, m.row0.w, m.row1.x, m.row1.y, m.row1.z, m.row1.w, m.row2.x, m.row2.y, m.row2.z, m.row2.w, m.row3.x, m.row3.y, m.row3.z, m.row3.w); */ m.CopyToBuf(matBuf, me_idx); } void updateStressTensors(Solid* solid, VboManager* vbom) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); updateStressTensors_k <<<make_uint3(gridSize,1,1), make_uint3(BLOCKSIZE,1,1)>>> (*solid->body, vbom->GetBuf(STRESS_TENSOR_VERTICES).matBuf, vbom->GetBuf(STRESS_TENSOR_NORMALS).buf, vbom->GetBuf(CENTER_OF_MASS).buf, vbom->GetBuf(EIGEN_VECTORS).buf, vbom->GetBuf(EIGEN_VALUES).buf); } __global__ void updateStressTensorNormals_k(Body body, float4* vert, float4* norm, float4* eigenValues) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=body.numTetrahedra) return; } void updateStressTensorNormals(Solid* solid, VboManager* vbom) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); updateStressTensorNormals_k <<<make_uint3(gridSize,1,1), make_uint3(BLOCKSIZE,1,1)>>> (*solid->body, vbom->GetBuf(STRESS_TENSOR_VERTICES).buf, vbom->GetBuf(STRESS_TENSOR_NORMALS).buf, vbom->GetBuf(EIGEN_VALUES).buf); } __global__ void planeClipping_k(Body body, Point* points, float4* displacements, float4* bodyMesh, float4* bodyColr, float4* com, float minX) { int me_idx = blockIdx.x * blockDim.x + threadIdx.x; if (me_idx>=body.numTetrahedra) return; Tetrahedron tetra = body.tetrahedra[me_idx]; float4 a, b, c, d; a = points[tetra.x] + displacements[tetra.x]; b = points[tetra.y] + displacements[tetra.y]; c = points[tetra.z] + displacements[tetra.z]; d = points[tetra.w] + displacements[tetra.w]; // Jump index with 12 since there is 4 faces pr. tetrahedra each with 3 vertices. int vert_idx = me_idx * 12; if ( a.x < minX || b.x < minX || c.x < minX || d.x < minX ) { for (unsigned int i=0; i<12; i++) { bodyMesh[vert_idx++] = make_float4(0.0,0.0,0.0,0.0); } com[me_idx] = make_float4(-1000.0,0.0,0.0,0.0); } else { for (unsigned int i=0; i<12; i++) { float dist = (minX - com[me_idx].x);// / 10.0f;//((minX - com[me_idx].x)); if( dist < 0 ) { dist *= -1.0f; } //if( dist > 1.0 ) dist = 1.0; bodyColr[vert_idx++].w = 2.0f / dist;//dist;//1.0 - dist; } } } void planeClipping(Solid* solid, VboManager* vbom, float minX) { int gridSize = (int)ceil(((float)solid->body->numTetrahedra)/BLOCKSIZE); planeClipping_k<<<make_uint3(gridSize,1,1), make_uint3(BLOCKSIZE,1,1)>>> (*solid->body, solid->vertexpool->data, solid->vertexpool->Ui_t, vbom->GetBuf(BODY_MESH).buf, vbom->GetBuf(BODY_COLORS).buf, vbom->GetBuf(CENTER_OF_MASS).buf, minX); CHECK_FOR_CUDA_ERROR(); }
1c0359ee55a58fa7b114506be2f429453b72a467.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // // This sample demonstrates dynamic global memory allocation through device C++ new and delete operators and virtual function declarations available with CUDA 4.0. #include <stdio.h> #include <cutil_inline.h> #include <shrUtils.h> #include <shrQATest.h> #include <stdlib.h> #include <vector> #include <algorithm> const char *sSDKsample = "newdelete"; #include "container.hpp" ///////////////////////////////////////////////////////////////////////////// // // Kernels to allocate and instanciate Container objects on the device heap // //////////////////////////////////////////////////////////////////////////// __global__ void stackCreate(Container<int>** g_container) { // StackAtomicPop object and the data storage are allocated in device heap memory. // This makes it persistent for the lifetime of the CUDA context. // The grid has only one thread as only a single object instance is needed. *g_container = new Stack<int>(); } __global__ void vectorCreate(Container<int>** g_container, int max_size) { // The Vector object and the data storage are allocated in device heap memory. // This makes it persistent for the lifetime of the CUDA context. // The grid has only one thread as only a single object instance is needed. *g_container = new Vector<int>(max_size); } ///////////////////////////////////////////////////////////////////////////// // // Kernels to fill and consume shared Container objects. // //////////////////////////////////////////////////////////////////////////// __global__ void containerFill(Container<int>** g_container ) { // All threads of the grid cooperatively populate the shared Container object with data. if( threadIdx.x == 0 ) (*g_container)->push(blockIdx.x); } __global__ void containerConsume(Container<int>** g_container, int* d_result) { // All threads of the grid cooperatively consume the data from the shared Container object. int idx = blockIdx.x * blockDim.x + threadIdx.x; int v; if( (*g_container)->pop(v) ) d_result[idx] = v; else d_result[idx] = -1; } ///////////////////////////////////////////////////////////////////////////// // // Kernel to delete shared Container objects. // //////////////////////////////////////////////////////////////////////////// __global__ void containerDelete(Container<int>** g_container) { delete *g_container; } /////////////////////////////////////////////////////////////////////////////////////////// // // Kernels to using of placement new to put shared Vector objects and data in shared memory // /////////////////////////////////////////////////////////////////////////////////////////// __global__ void placementNew(int* d_result) { __shared__ unsigned char __align__(8) s_buffer[sizeof(Vector<int>)]; __shared__ int __align__(8) s_data[1024]; __shared__ Vector<int>* s_vector; // The first thread of the block initializes the shared Vector object. // The placement new operator enables the Vector object and the data array top be placed in shared memory. if( threadIdx.x == 0) { s_vector = new (s_buffer) Vector<int>(1024, s_data); } __syncthreads(); if( (threadIdx.x & 1) == 0 ) s_vector->push(threadIdx.x >> 1); __syncthreads(); int v; if( s_vector->pop(v) ) d_result[threadIdx.x] = v; else d_result[threadIdx.x] = -1; // Note: deleting objects placed in shared memory is not necessary (lifetime of shared memory is that of the block) } struct ComplexType_t { int a; int b; float c; float d; }; __global__ void complexVector(int* d_result) { __shared__ unsigned char __align__(8) s_buffer[sizeof(Vector<ComplexType_t>)]; __shared__ ComplexType_t __align__(8) s_data[1024]; __shared__ Vector<ComplexType_t>* s_vector; // The first thread of the block initializes the shared Vector object. // The placement new operator enables the Vector object and the data array top be placed in shared memory. if( threadIdx.x == 0) s_vector = new (s_buffer) Vector<ComplexType_t>(1024, s_data); __syncthreads(); if( (threadIdx.x & 1) == 0 ) { ComplexType_t data; data.a = threadIdx.x >> 1; data.b = blockIdx.x; data.c = threadIdx.x / (float)(blockDim.x); data.d = blockIdx.x / (float)(gridDim.x); s_vector->push(data); } __syncthreads(); ComplexType_t v; if( s_vector->pop(v) ) { d_result[threadIdx.x] = v.a; } else { d_result[threadIdx.x] = -1; } // Note: deleting objects placed in shared memory is not necessary (lifetime of shared memory is that of the block) } /////////////////////////////////////////////////////////////////////////////////////////// // // Host code // /////////////////////////////////////////////////////////////////////////////////////////// bool checkResult(int* d_result, int N) { std::vector<int> h_result; h_result.resize(N); cutilSafeCall( hipMemcpy(&h_result[0], d_result, N*sizeof(int), hipMemcpyDeviceToHost ) ); std::sort(h_result.begin(), h_result.end()); bool success = true; bool test = false; int value=0; for( int i=0; i < N; ++i ) { if( h_result[i] != -1 ) test = true; if( test && (value++) != h_result[i] ) success = false; } return success; } bool testContainer(Container<int>** d_container, int blocks, int threads) { int* d_result; hipMalloc( &d_result, blocks*threads*sizeof(int)); hipLaunchKernelGGL(( containerFill), dim3(blocks),dim3(threads), 0, 0, d_container); hipLaunchKernelGGL(( containerConsume), dim3(blocks),dim3(threads), 0, 0, d_container, d_result); hipLaunchKernelGGL(( containerDelete), dim3(1),dim3(1), 0, 0, d_container); cutilSafeCall( cutilDeviceSynchronize() ); bool success = checkResult(d_result, blocks*threads); hipFree( d_result ); return success; } bool testPlacementNew(int threads) { int* d_result; hipMalloc( &d_result, threads*sizeof(int)); hipLaunchKernelGGL(( placementNew), dim3(1), dim3(threads), 0, 0, d_result); cutilSafeCall( cutilDeviceSynchronize() ); bool success = checkResult(d_result, threads); hipFree( d_result ); return success; } bool testComplexType(int threads) { int* d_result; hipMalloc( &d_result, threads*sizeof(int)); hipLaunchKernelGGL(( complexVector), dim3(1), dim3(threads), 0, 0, d_result); cutilSafeCall( cutilDeviceSynchronize() ); bool success = checkResult(d_result, threads); hipFree( d_result ); return success; } /////////////////////////////////////////////////////////////////////////////////////////// // // MAIN // /////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { int cuda_device = 0; shrQAStart(argc, argv); // use command-line specified CUDA device, otherwise use device with highest Gflops/s cuda_device = cutilChooseCudaDevice(argc, argv); hipDeviceProp_t deviceProp; cutilSafeCall( hipGetDevice(&cuda_device)); cutilSafeCall( hipGetDeviceProperties(&deviceProp, cuda_device) ); if( deviceProp.major < 2 ) { shrLog("> This GPU with Compute Capability %d.%d does not meet minimum requirements.\n", deviceProp.major, deviceProp.minor); shrLog("> A GPU with Compute Capability >= 2.0 is required.\n", sSDKsample); shrLog("> Test will not run. Exiting.\n"); shrQAFinishExit(argc, (const char**)argv, QA_PASSED); } // set the heap size for deviuce size new/delete to 128 MB #if CUDART_VERSION >= 4000 hipDeviceSetLimit(hipLimitMallocHeapSize, 128 * (1 << 20)); #else hipThreadSetLimit(hipLimitMallocHeapSize, 128 * (1 << 20)); #endif Container<int>** d_container; hipMalloc( &d_container, sizeof(Container<int>**) ); bool bTest = false; int test_passed = 0; shrLog(" > Container = StackAtomicPop test "); hipLaunchKernelGGL(( stackCreate), dim3(1),dim3(1), 0, 0, d_container); bTest = testContainer(d_container, 1024, 128); shrLog(bTest ? "OK\n\n" : "NOT OK\n\n"); test_passed += (bTest ? 1 : 0); shrLog(" > Container = Vector test "); hipLaunchKernelGGL(( vectorCreate), dim3(1),dim3(1), 0, 0, d_container, 1024 * 128); bTest = testContainer(d_container, 1024, 128); shrLog(bTest ? "OK\n\n" : "NOT OK\n\n"); test_passed += (bTest ? 1 : 0); hipFree( d_container ); shrLog(" > Container = Vector, using placement new on SMEM buffer test "); bTest = testPlacementNew(1024); shrLog(bTest ? "OK\n\n" : "NOT OK\n\n"); test_passed += (bTest ? 1 : 0); shrLog(" > Container = Vector, with user defined datatype test "); bTest = testComplexType(1024); shrLog(bTest ? "OK\n\n" : "NOT OK\n\n"); test_passed += (bTest ? 1 : 0); shrLog("Test Summary: %d/4 succesfully run\n", test_passed); cutilDeviceReset(); shrQAFinishExit(argc, (const char**)argv, (test_passed==4) ? QA_PASSED : QA_FAILED); };
1c0359ee55a58fa7b114506be2f429453b72a467.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // // This sample demonstrates dynamic global memory allocation through device C++ new and delete operators and virtual function declarations available with CUDA 4.0. #include <stdio.h> #include <cutil_inline.h> #include <shrUtils.h> #include <shrQATest.h> #include <stdlib.h> #include <vector> #include <algorithm> const char *sSDKsample = "newdelete"; #include "container.hpp" ///////////////////////////////////////////////////////////////////////////// // // Kernels to allocate and instanciate Container objects on the device heap // //////////////////////////////////////////////////////////////////////////// __global__ void stackCreate(Container<int>** g_container) { // StackAtomicPop object and the data storage are allocated in device heap memory. // This makes it persistent for the lifetime of the CUDA context. // The grid has only one thread as only a single object instance is needed. *g_container = new Stack<int>(); } __global__ void vectorCreate(Container<int>** g_container, int max_size) { // The Vector object and the data storage are allocated in device heap memory. // This makes it persistent for the lifetime of the CUDA context. // The grid has only one thread as only a single object instance is needed. *g_container = new Vector<int>(max_size); } ///////////////////////////////////////////////////////////////////////////// // // Kernels to fill and consume shared Container objects. // //////////////////////////////////////////////////////////////////////////// __global__ void containerFill(Container<int>** g_container ) { // All threads of the grid cooperatively populate the shared Container object with data. if( threadIdx.x == 0 ) (*g_container)->push(blockIdx.x); } __global__ void containerConsume(Container<int>** g_container, int* d_result) { // All threads of the grid cooperatively consume the data from the shared Container object. int idx = blockIdx.x * blockDim.x + threadIdx.x; int v; if( (*g_container)->pop(v) ) d_result[idx] = v; else d_result[idx] = -1; } ///////////////////////////////////////////////////////////////////////////// // // Kernel to delete shared Container objects. // //////////////////////////////////////////////////////////////////////////// __global__ void containerDelete(Container<int>** g_container) { delete *g_container; } /////////////////////////////////////////////////////////////////////////////////////////// // // Kernels to using of placement new to put shared Vector objects and data in shared memory // /////////////////////////////////////////////////////////////////////////////////////////// __global__ void placementNew(int* d_result) { __shared__ unsigned char __align__(8) s_buffer[sizeof(Vector<int>)]; __shared__ int __align__(8) s_data[1024]; __shared__ Vector<int>* s_vector; // The first thread of the block initializes the shared Vector object. // The placement new operator enables the Vector object and the data array top be placed in shared memory. if( threadIdx.x == 0) { s_vector = new (s_buffer) Vector<int>(1024, s_data); } __syncthreads(); if( (threadIdx.x & 1) == 0 ) s_vector->push(threadIdx.x >> 1); __syncthreads(); int v; if( s_vector->pop(v) ) d_result[threadIdx.x] = v; else d_result[threadIdx.x] = -1; // Note: deleting objects placed in shared memory is not necessary (lifetime of shared memory is that of the block) } struct ComplexType_t { int a; int b; float c; float d; }; __global__ void complexVector(int* d_result) { __shared__ unsigned char __align__(8) s_buffer[sizeof(Vector<ComplexType_t>)]; __shared__ ComplexType_t __align__(8) s_data[1024]; __shared__ Vector<ComplexType_t>* s_vector; // The first thread of the block initializes the shared Vector object. // The placement new operator enables the Vector object and the data array top be placed in shared memory. if( threadIdx.x == 0) s_vector = new (s_buffer) Vector<ComplexType_t>(1024, s_data); __syncthreads(); if( (threadIdx.x & 1) == 0 ) { ComplexType_t data; data.a = threadIdx.x >> 1; data.b = blockIdx.x; data.c = threadIdx.x / (float)(blockDim.x); data.d = blockIdx.x / (float)(gridDim.x); s_vector->push(data); } __syncthreads(); ComplexType_t v; if( s_vector->pop(v) ) { d_result[threadIdx.x] = v.a; } else { d_result[threadIdx.x] = -1; } // Note: deleting objects placed in shared memory is not necessary (lifetime of shared memory is that of the block) } /////////////////////////////////////////////////////////////////////////////////////////// // // Host code // /////////////////////////////////////////////////////////////////////////////////////////// bool checkResult(int* d_result, int N) { std::vector<int> h_result; h_result.resize(N); cutilSafeCall( cudaMemcpy(&h_result[0], d_result, N*sizeof(int), cudaMemcpyDeviceToHost ) ); std::sort(h_result.begin(), h_result.end()); bool success = true; bool test = false; int value=0; for( int i=0; i < N; ++i ) { if( h_result[i] != -1 ) test = true; if( test && (value++) != h_result[i] ) success = false; } return success; } bool testContainer(Container<int>** d_container, int blocks, int threads) { int* d_result; cudaMalloc( &d_result, blocks*threads*sizeof(int)); containerFill<<<blocks,threads>>>(d_container); containerConsume<<<blocks,threads>>>(d_container, d_result); containerDelete<<<1,1>>>(d_container); cutilSafeCall( cutilDeviceSynchronize() ); bool success = checkResult(d_result, blocks*threads); cudaFree( d_result ); return success; } bool testPlacementNew(int threads) { int* d_result; cudaMalloc( &d_result, threads*sizeof(int)); placementNew<<<1, threads>>>(d_result); cutilSafeCall( cutilDeviceSynchronize() ); bool success = checkResult(d_result, threads); cudaFree( d_result ); return success; } bool testComplexType(int threads) { int* d_result; cudaMalloc( &d_result, threads*sizeof(int)); complexVector<<<1, threads>>>(d_result); cutilSafeCall( cutilDeviceSynchronize() ); bool success = checkResult(d_result, threads); cudaFree( d_result ); return success; } /////////////////////////////////////////////////////////////////////////////////////////// // // MAIN // /////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { int cuda_device = 0; shrQAStart(argc, argv); // use command-line specified CUDA device, otherwise use device with highest Gflops/s cuda_device = cutilChooseCudaDevice(argc, argv); cudaDeviceProp deviceProp; cutilSafeCall( cudaGetDevice(&cuda_device)); cutilSafeCall( cudaGetDeviceProperties(&deviceProp, cuda_device) ); if( deviceProp.major < 2 ) { shrLog("> This GPU with Compute Capability %d.%d does not meet minimum requirements.\n", deviceProp.major, deviceProp.minor); shrLog("> A GPU with Compute Capability >= 2.0 is required.\n", sSDKsample); shrLog("> Test will not run. Exiting.\n"); shrQAFinishExit(argc, (const char**)argv, QA_PASSED); } // set the heap size for deviuce size new/delete to 128 MB #if CUDART_VERSION >= 4000 cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128 * (1 << 20)); #else cudaThreadSetLimit(cudaLimitMallocHeapSize, 128 * (1 << 20)); #endif Container<int>** d_container; cudaMalloc( &d_container, sizeof(Container<int>**) ); bool bTest = false; int test_passed = 0; shrLog(" > Container = StackAtomicPop test "); stackCreate<<<1,1>>>(d_container); bTest = testContainer(d_container, 1024, 128); shrLog(bTest ? "OK\n\n" : "NOT OK\n\n"); test_passed += (bTest ? 1 : 0); shrLog(" > Container = Vector test "); vectorCreate<<<1,1>>>(d_container, 1024 * 128); bTest = testContainer(d_container, 1024, 128); shrLog(bTest ? "OK\n\n" : "NOT OK\n\n"); test_passed += (bTest ? 1 : 0); cudaFree( d_container ); shrLog(" > Container = Vector, using placement new on SMEM buffer test "); bTest = testPlacementNew(1024); shrLog(bTest ? "OK\n\n" : "NOT OK\n\n"); test_passed += (bTest ? 1 : 0); shrLog(" > Container = Vector, with user defined datatype test "); bTest = testComplexType(1024); shrLog(bTest ? "OK\n\n" : "NOT OK\n\n"); test_passed += (bTest ? 1 : 0); shrLog("Test Summary: %d/4 succesfully run\n", test_passed); cutilDeviceReset(); shrQAFinishExit(argc, (const char**)argv, (test_passed==4) ? QA_PASSED : QA_FAILED); };
1201a98066cf17028b6f464a79fc2e9cb0214797.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Program Name: CudaVectorAdd This program adds two vector arrays on GPU. */ #include <stdio.h> #define N 512 // Device Vector Add Function. __global__ void add(int *a, int *b, int *c) { // Using blocks only. int tid = threadIdx.x; c[tid] = a[tid] + b[tid]; } int main(){ int *a,*b,*c; // Host side pointers. int *dev_a, *dev_b, *dev_c; // Device side pointers. //Host side memory allocation. a=(int *)malloc(N*sizeof(int)); b=(int *)malloc(N*sizeof(int)); c=(int *)malloc(N*sizeof(int)); //Device side memory allocation. hipMalloc( (void**)&dev_a, N * sizeof(int) ); hipMalloc( (void**)&dev_b, N * sizeof(int) ); hipMalloc( (void**)&dev_c, N * sizeof(int) ); // Initializing Vectors for (int i=0; i<N; i++) { a[i] = i; b[i] = i; } //Copying data to the GPU. hipMemcpy ( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice ); hipMemcpy ( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice ); // GPU kernel launch with one block and N=512 blocks. hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, dev_a, dev_b, dev_c); // Copying results back to the Host. hipMemcpy(c, dev_c, N * sizeof(int),hipMemcpyDeviceToHost ); //Printing results. for (int i=0; i<N; i++) { printf("%d + %d = %d\n", a[i],b[i],c[i]); } // Freeing memory to keep the atmosphere clean. free(a); free(b); free(c); hipFree (dev_a); hipFree (dev_b); hipFree (dev_c); return 0; }
1201a98066cf17028b6f464a79fc2e9cb0214797.cu
/* Program Name: CudaVectorAdd This program adds two vector arrays on GPU. */ #include <stdio.h> #define N 512 // Device Vector Add Function. __global__ void add(int *a, int *b, int *c) { // Using blocks only. int tid = threadIdx.x; c[tid] = a[tid] + b[tid]; } int main(){ int *a,*b,*c; // Host side pointers. int *dev_a, *dev_b, *dev_c; // Device side pointers. //Host side memory allocation. a=(int *)malloc(N*sizeof(int)); b=(int *)malloc(N*sizeof(int)); c=(int *)malloc(N*sizeof(int)); //Device side memory allocation. cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // Initializing Vectors for (int i=0; i<N; i++) { a[i] = i; b[i] = i; } //Copying data to the GPU. cudaMemcpy ( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy ( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ); // GPU kernel launch with one block and N=512 blocks. add<<<1,N>>>(dev_a, dev_b, dev_c); // Copying results back to the Host. cudaMemcpy(c, dev_c, N * sizeof(int),cudaMemcpyDeviceToHost ); //Printing results. for (int i=0; i<N; i++) { printf("%d + %d = %d\n", a[i],b[i],c[i]); } // Freeing memory to keep the atmosphere clean. free(a); free(b); free(c); cudaFree (dev_a); cudaFree (dev_b); cudaFree (dev_c); return 0; }
09537bca4fe2b6ca0ead4ec171936544acbe10fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pairwise_hist.cuh" #include "split_properties_helpers.cuh" #include "compute_pair_hist_loop.cuh" #include <hip/hip_cooperative_groups.h> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> using namespace cooperative_groups; namespace NKernel { //TODO(noxoomo): tune it template<bool IsFullPass> struct THalfBytePairwiseHistUnrollTrait { static constexpr int InnerUnroll() { #if __CUDA_ARCH__ <= 350 return 2; #elif __CUDA_ARCH__ < 700 return 2; #else return 8;//IsFullPass ? 4 : 8; #endif } static constexpr int OuterUnroll() { #if __CUDA_ARCH__ <= 350 return 4; #elif __CUDA_ARCH__ < 700 return 2; #else return 1; #endif } }; template<int BLOCK_SIZE, class TCmpBins = TCmpBinsWithoutOneHot> struct TPairHistHalfByte { TCmpBins CmpBinsFunc; float* Slice; __forceinline__ __device__ int SliceOffset() { const int warpOffset = 1024 * (threadIdx.x / 32); //we store 4 histograms per block // x8 feature and x4 histograms, though histStart = blockIdx * 16 return warpOffset + (threadIdx.x & 16); } __forceinline__ __device__ TPairHistHalfByte(float* buff, TCmpBins cmpBinsFunc) : CmpBinsFunc(cmpBinsFunc) { Slice = buff; for (int i = threadIdx.x; i < BLOCK_SIZE * 32; i += BLOCK_SIZE) { Slice[i] = 0; } Slice += SliceOffset(); __syncthreads(); } __forceinline__ __device__ void AddPair(const ui32 ci1, const ui32 ci2, const float w) { thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block()); const bool flag = threadIdx.x & 1; const int shift = threadIdx.x & 14; const ui32 bins1 = RotateRight(flag ? ci2 : ci1, 2 * shift); const ui32 bins2 = RotateRight(flag ? ci1 : ci2, 2 * shift); #pragma unroll for (int i = 0; i < 8; i++) { const int f = ((shift + 2 * i) & 14); const int bin1 = (bins1 >> (28 - 4 * i)) & 15; const int bin2 = (bins2 >> (28 - 4 * i)) & 15; const int tmp = (CmpBinsFunc.Compare(i, bin1, bin2, flag) ? 0 : 512) + f; const int offset1 = 32 * bin1 + tmp + flag; const int offset2 = 32 * bin2 + tmp + !flag; groupTile.sync(); Slice[offset1] += w; groupTile.sync(); Slice[offset2] += w; } } #if __CUDA_ARCH__ < 700 template <int N> __forceinline__ __device__ void AddPairs(const ui32* ci1, const ui32* ci2, const float* w) { #pragma unroll for (int k = 0; k < N; ++k) { AddPair(ci1[k], ci2[k], w[k]); } } #else template <int N> __forceinline__ __device__ void AddPairs(const ui32* ci1, const ui32* ci2, const float* w) { thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block()); const bool flag = threadIdx.x & 1; const int shift = threadIdx.x & 14; ui32 bins1[N]; ui32 bins2[N]; #pragma unroll for (int k = 0; k < N; ++k) { bins1[k] = RotateRight(flag ? ci2[k] : ci1[k], 2 * shift); bins2[k] = RotateRight(flag ? ci1[k] : ci2[k], 2 * shift); } #pragma unroll for (int i = 0; i < 8; i++) { const int f = ((shift + 2 * i) & 14); int bin1[N]; int bin2[N]; #pragma unroll for (int k = 0; k < N; ++k) { bin1[k] = (bins1[k] >> (28 - 4 * i)) & 15; bin2[k] = (bins2[k] >> (28 - 4 * i)) & 15; } int offset1[N]; int offset2[N]; #pragma unroll for (int k = 0; k < N; ++k) { const int tmp = (CmpBinsFunc.Compare(i, bin1[k], bin2[k], flag) ? 0 : 512) + f; offset1[k] = 32 * bin1[k] + tmp + flag; offset2[k] = 32 * bin2[k] + tmp + !flag; } groupTile.sync(); #pragma unroll for (int k = 0; k < N; ++k) { Slice[offset1[k]] += w[k]; } groupTile.sync(); #pragma unroll for (int k = 0; k < N; ++k) { Slice[offset2[k]] += w[k]; } } } #endif __forceinline__ __device__ void Reduce() { Slice -= SliceOffset(); __syncthreads(); { const int warpHistSize = 1024; for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) { float sum = 0; #pragma unroll 12 for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) { sum += Slice[i]; } Slice[warpHistSize + start] = sum; } } __syncthreads(); const int maxFoldCount = 16; const int fold = (threadIdx.x >> 1) & 15; const int f = threadIdx.x / 32; if (threadIdx.x < 256) { float weightLeq = 0; float weightGe = 0; const bool isSecondBin = (threadIdx.x & 1); if (fold < maxFoldCount) { const volatile float* __restrict__ src = Slice + 1024 //warpHistSize + 32 * fold + 2 * f + isSecondBin; weightLeq = src[0] + src[16]; weightGe = src[512] + src[528]; Slice[4 * (maxFoldCount * f + fold) + isSecondBin] = weightLeq; Slice[4 * (maxFoldCount * f + fold) + 2 + isSecondBin] = weightGe; } } __syncthreads(); } }; template<int BLOCK_SIZE, int N, int OUTER_UNROLL, int BLOCKS_PER_FEATURE> __forceinline__ __device__ void ComputeSplitPropertiesHalfBytePass(const TCFeature* feature, int fCount, const uint* __restrict cindex, const uint2* __restrict pairs, const float* __restrict weight, const TDataPartition* partition, float* __restrict histogram, float* __restrict smem) { #define RUN_COMPUTE_HIST() \ ComputePairHistogram < BLOCK_SIZE, 1, N, OUTER_UNROLL, BLOCKS_PER_FEATURE, THist >(partition->Offset, cindex, partition->Size, pairs, weight, hist); if (HasOneHotFeatures(feature, fCount, reinterpret_cast<int*>(smem))) { using TCmpBins = TCmpBinsWithOneHot<8>; TCmpBins cmpBins(feature, fCount); using THist = TPairHistHalfByte<BLOCK_SIZE, TCmpBins>; THist hist(smem, cmpBins); RUN_COMPUTE_HIST(); } else { using THist = TPairHistHalfByte<BLOCK_SIZE>; THist hist(smem, TCmpBinsWithoutOneHot()); RUN_COMPUTE_HIST(); } #undef RUN_COMPUTE_HIST if (threadIdx.x < 256) { const int histId = threadIdx.x & 3; const int binId = (threadIdx.x >> 2) & 7; const int fid = (threadIdx.x >> 5) & 7; if (fid < fCount) { const ui32 bfStart = feature[fid].FirstFoldIndex; histogram += 4 * bfStart; #pragma unroll 2 for (int fold = binId; fold < feature[fid].Folds; fold += 8) { if (fold < feature[fid].Folds) { const int readOffset = 4 * (16 * fid + fold) + histId; if (BLOCKS_PER_FEATURE > 1) { atomicAdd(histogram + 4 * fold + histId, smem[readOffset]); } else { histogram[4 * fold + histId] += smem[readOffset]; } } } } } } #define DECLARE_PASS_HALF_BYTE(N, OUTER_UNROLL, M) \ ComputeSplitPropertiesHalfBytePass<BlockSize, N, OUTER_UNROLL, M>(feature, fCount, cindex, pairs, weight, partition, histogram, &localHist[0]); template<int BlockSize, bool IsFullPass, int M> #if __CUDA_ARCH__ >= 700 __launch_bounds__(BlockSize, 2) #else __launch_bounds__(BlockSize, 1) #endif __global__ void ComputeSplitPropertiesHalfBytePairs(const TCFeature* feature, int fCount, const ui32* cindex, const uint2* pairs, const float* weight, const TDataPartition* partition, int histLineSize, float* histogram) { //histogram line size - size of one part hist. const int featureOffset = (blockIdx.x / M) * 8; feature += featureOffset; cindex += feature->Offset; fCount = min(fCount - featureOffset, 8); if (IsFullPass) { partition += blockIdx.y; histogram += blockIdx.y * histLineSize * 4ULL; } else { const int depth = (int)log2((float)gridDim.y); int partId = GetPairwisePartIdToCalculate(partition); partition += partId; histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4ULL; } if (partition->Size == 0) { return; } __shared__ float localHist[32 * BlockSize]; DECLARE_PASS_HALF_BYTE(THalfBytePairwiseHistUnrollTrait<IsFullPass>::InnerUnroll(), THalfBytePairwiseHistUnrollTrait<IsFullPass>::OuterUnroll(), M) } void ComputePairwiseHistogramHalfByte(const TCFeature* features, const ui32 featureCount, const ui32 halfByteFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, TCudaStream stream) { assert(featureCount == halfByteFeatureCount); if (featureCount > 0) { const int blockSize = 384; dim3 numBlocks; numBlocks.x = (featureCount + 7) / 8; numBlocks.y = fullPass ? partCount : partCount / 4; numBlocks.z = fullPass ? 1 : 3; const ui32 blockPerFeatureMultiplier = EstimateBlockPerFeatureMultiplier(numBlocks, pairCount, 64); numBlocks.x *= blockPerFeatureMultiplier; #define NB_HIST(IS_FULL, BLOCKS_PER_FEATURE) \ ComputeSplitPropertiesHalfBytePairs < blockSize, IS_FULL, BLOCKS_PER_FEATURE > << <numBlocks, blockSize, 0, stream>>>(\ features, featureCount, compressedIndex, pairs,\ weight, partition, histLineSize, histogram); #define DISPATCH(BLOCKS_PER_FEATURE) \ if (fullPass) { \ NB_HIST(true, BLOCKS_PER_FEATURE) \ } else { \ NB_HIST(false, BLOCKS_PER_FEATURE)\ } if (blockPerFeatureMultiplier == 1) { DISPATCH(1); } else if (blockPerFeatureMultiplier == 2) { DISPATCH(2); } else if (blockPerFeatureMultiplier == 4) { DISPATCH(4); } else if (blockPerFeatureMultiplier == 8) { DISPATCH(8); } else if (blockPerFeatureMultiplier == 16) { DISPATCH(16); } else if (blockPerFeatureMultiplier == 32) { DISPATCH(32); } else if (blockPerFeatureMultiplier == 64) { DISPATCH(64); } else { exit(0); } #undef NB_HIST #undef DISPATCH } } }
09537bca4fe2b6ca0ead4ec171936544acbe10fa.cu
#include "pairwise_hist.cuh" #include "split_properties_helpers.cuh" #include "compute_pair_hist_loop.cuh" #include <cooperative_groups.h> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> using namespace cooperative_groups; namespace NKernel { //TODO(noxoomo): tune it template<bool IsFullPass> struct THalfBytePairwiseHistUnrollTrait { static constexpr int InnerUnroll() { #if __CUDA_ARCH__ <= 350 return 2; #elif __CUDA_ARCH__ < 700 return 2; #else return 8;//IsFullPass ? 4 : 8; #endif } static constexpr int OuterUnroll() { #if __CUDA_ARCH__ <= 350 return 4; #elif __CUDA_ARCH__ < 700 return 2; #else return 1; #endif } }; template<int BLOCK_SIZE, class TCmpBins = TCmpBinsWithoutOneHot> struct TPairHistHalfByte { TCmpBins CmpBinsFunc; float* Slice; __forceinline__ __device__ int SliceOffset() { const int warpOffset = 1024 * (threadIdx.x / 32); //we store 4 histograms per block // x8 feature and x4 histograms, though histStart = blockIdx * 16 return warpOffset + (threadIdx.x & 16); } __forceinline__ __device__ TPairHistHalfByte(float* buff, TCmpBins cmpBinsFunc) : CmpBinsFunc(cmpBinsFunc) { Slice = buff; for (int i = threadIdx.x; i < BLOCK_SIZE * 32; i += BLOCK_SIZE) { Slice[i] = 0; } Slice += SliceOffset(); __syncthreads(); } __forceinline__ __device__ void AddPair(const ui32 ci1, const ui32 ci2, const float w) { thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block()); const bool flag = threadIdx.x & 1; const int shift = threadIdx.x & 14; const ui32 bins1 = RotateRight(flag ? ci2 : ci1, 2 * shift); const ui32 bins2 = RotateRight(flag ? ci1 : ci2, 2 * shift); #pragma unroll for (int i = 0; i < 8; i++) { const int f = ((shift + 2 * i) & 14); const int bin1 = (bins1 >> (28 - 4 * i)) & 15; const int bin2 = (bins2 >> (28 - 4 * i)) & 15; const int tmp = (CmpBinsFunc.Compare(i, bin1, bin2, flag) ? 0 : 512) + f; const int offset1 = 32 * bin1 + tmp + flag; const int offset2 = 32 * bin2 + tmp + !flag; groupTile.sync(); Slice[offset1] += w; groupTile.sync(); Slice[offset2] += w; } } #if __CUDA_ARCH__ < 700 template <int N> __forceinline__ __device__ void AddPairs(const ui32* ci1, const ui32* ci2, const float* w) { #pragma unroll for (int k = 0; k < N; ++k) { AddPair(ci1[k], ci2[k], w[k]); } } #else template <int N> __forceinline__ __device__ void AddPairs(const ui32* ci1, const ui32* ci2, const float* w) { thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block()); const bool flag = threadIdx.x & 1; const int shift = threadIdx.x & 14; ui32 bins1[N]; ui32 bins2[N]; #pragma unroll for (int k = 0; k < N; ++k) { bins1[k] = RotateRight(flag ? ci2[k] : ci1[k], 2 * shift); bins2[k] = RotateRight(flag ? ci1[k] : ci2[k], 2 * shift); } #pragma unroll for (int i = 0; i < 8; i++) { const int f = ((shift + 2 * i) & 14); int bin1[N]; int bin2[N]; #pragma unroll for (int k = 0; k < N; ++k) { bin1[k] = (bins1[k] >> (28 - 4 * i)) & 15; bin2[k] = (bins2[k] >> (28 - 4 * i)) & 15; } int offset1[N]; int offset2[N]; #pragma unroll for (int k = 0; k < N; ++k) { const int tmp = (CmpBinsFunc.Compare(i, bin1[k], bin2[k], flag) ? 0 : 512) + f; offset1[k] = 32 * bin1[k] + tmp + flag; offset2[k] = 32 * bin2[k] + tmp + !flag; } groupTile.sync(); #pragma unroll for (int k = 0; k < N; ++k) { Slice[offset1[k]] += w[k]; } groupTile.sync(); #pragma unroll for (int k = 0; k < N; ++k) { Slice[offset2[k]] += w[k]; } } } #endif __forceinline__ __device__ void Reduce() { Slice -= SliceOffset(); __syncthreads(); { const int warpHistSize = 1024; for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) { float sum = 0; #pragma unroll 12 for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) { sum += Slice[i]; } Slice[warpHistSize + start] = sum; } } __syncthreads(); const int maxFoldCount = 16; const int fold = (threadIdx.x >> 1) & 15; const int f = threadIdx.x / 32; if (threadIdx.x < 256) { float weightLeq = 0; float weightGe = 0; const bool isSecondBin = (threadIdx.x & 1); if (fold < maxFoldCount) { const volatile float* __restrict__ src = Slice + 1024 //warpHistSize + 32 * fold + 2 * f + isSecondBin; weightLeq = src[0] + src[16]; weightGe = src[512] + src[528]; Slice[4 * (maxFoldCount * f + fold) + isSecondBin] = weightLeq; Slice[4 * (maxFoldCount * f + fold) + 2 + isSecondBin] = weightGe; } } __syncthreads(); } }; template<int BLOCK_SIZE, int N, int OUTER_UNROLL, int BLOCKS_PER_FEATURE> __forceinline__ __device__ void ComputeSplitPropertiesHalfBytePass(const TCFeature* feature, int fCount, const uint* __restrict cindex, const uint2* __restrict pairs, const float* __restrict weight, const TDataPartition* partition, float* __restrict histogram, float* __restrict smem) { #define RUN_COMPUTE_HIST() \ ComputePairHistogram < BLOCK_SIZE, 1, N, OUTER_UNROLL, BLOCKS_PER_FEATURE, THist >(partition->Offset, cindex, partition->Size, pairs, weight, hist); if (HasOneHotFeatures(feature, fCount, reinterpret_cast<int*>(smem))) { using TCmpBins = TCmpBinsWithOneHot<8>; TCmpBins cmpBins(feature, fCount); using THist = TPairHistHalfByte<BLOCK_SIZE, TCmpBins>; THist hist(smem, cmpBins); RUN_COMPUTE_HIST(); } else { using THist = TPairHistHalfByte<BLOCK_SIZE>; THist hist(smem, TCmpBinsWithoutOneHot()); RUN_COMPUTE_HIST(); } #undef RUN_COMPUTE_HIST if (threadIdx.x < 256) { const int histId = threadIdx.x & 3; const int binId = (threadIdx.x >> 2) & 7; const int fid = (threadIdx.x >> 5) & 7; if (fid < fCount) { const ui32 bfStart = feature[fid].FirstFoldIndex; histogram += 4 * bfStart; #pragma unroll 2 for (int fold = binId; fold < feature[fid].Folds; fold += 8) { if (fold < feature[fid].Folds) { const int readOffset = 4 * (16 * fid + fold) + histId; if (BLOCKS_PER_FEATURE > 1) { atomicAdd(histogram + 4 * fold + histId, smem[readOffset]); } else { histogram[4 * fold + histId] += smem[readOffset]; } } } } } } #define DECLARE_PASS_HALF_BYTE(N, OUTER_UNROLL, M) \ ComputeSplitPropertiesHalfBytePass<BlockSize, N, OUTER_UNROLL, M>(feature, fCount, cindex, pairs, weight, partition, histogram, &localHist[0]); template<int BlockSize, bool IsFullPass, int M> #if __CUDA_ARCH__ >= 700 __launch_bounds__(BlockSize, 2) #else __launch_bounds__(BlockSize, 1) #endif __global__ void ComputeSplitPropertiesHalfBytePairs(const TCFeature* feature, int fCount, const ui32* cindex, const uint2* pairs, const float* weight, const TDataPartition* partition, int histLineSize, float* histogram) { //histogram line size - size of one part hist. const int featureOffset = (blockIdx.x / M) * 8; feature += featureOffset; cindex += feature->Offset; fCount = min(fCount - featureOffset, 8); if (IsFullPass) { partition += blockIdx.y; histogram += blockIdx.y * histLineSize * 4ULL; } else { const int depth = (int)log2((float)gridDim.y); int partId = GetPairwisePartIdToCalculate(partition); partition += partId; histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4ULL; } if (partition->Size == 0) { return; } __shared__ float localHist[32 * BlockSize]; DECLARE_PASS_HALF_BYTE(THalfBytePairwiseHistUnrollTrait<IsFullPass>::InnerUnroll(), THalfBytePairwiseHistUnrollTrait<IsFullPass>::OuterUnroll(), M) } void ComputePairwiseHistogramHalfByte(const TCFeature* features, const ui32 featureCount, const ui32 halfByteFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, TCudaStream stream) { assert(featureCount == halfByteFeatureCount); if (featureCount > 0) { const int blockSize = 384; dim3 numBlocks; numBlocks.x = (featureCount + 7) / 8; numBlocks.y = fullPass ? partCount : partCount / 4; numBlocks.z = fullPass ? 1 : 3; const ui32 blockPerFeatureMultiplier = EstimateBlockPerFeatureMultiplier(numBlocks, pairCount, 64); numBlocks.x *= blockPerFeatureMultiplier; #define NB_HIST(IS_FULL, BLOCKS_PER_FEATURE) \ ComputeSplitPropertiesHalfBytePairs < blockSize, IS_FULL, BLOCKS_PER_FEATURE > << <numBlocks, blockSize, 0, stream>>>(\ features, featureCount, compressedIndex, pairs,\ weight, partition, histLineSize, histogram); #define DISPATCH(BLOCKS_PER_FEATURE) \ if (fullPass) { \ NB_HIST(true, BLOCKS_PER_FEATURE) \ } else { \ NB_HIST(false, BLOCKS_PER_FEATURE)\ } if (blockPerFeatureMultiplier == 1) { DISPATCH(1); } else if (blockPerFeatureMultiplier == 2) { DISPATCH(2); } else if (blockPerFeatureMultiplier == 4) { DISPATCH(4); } else if (blockPerFeatureMultiplier == 8) { DISPATCH(8); } else if (blockPerFeatureMultiplier == 16) { DISPATCH(16); } else if (blockPerFeatureMultiplier == 32) { DISPATCH(32); } else if (blockPerFeatureMultiplier == 64) { DISPATCH(64); } else { exit(0); } #undef NB_HIST #undef DISPATCH } } }
55e6cb7fc66b9bbf58d45ddc8449288c56ce33f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void findPartIndicesKernel(int size, int *array, int *partIndices) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int value = array[idx]; int nextValue = (idx != size - 1) ? array[idx + 1] : -1; if (value != nextValue) { partIndices[value + 1] = idx + 1; } } }
55e6cb7fc66b9bbf58d45ddc8449288c56ce33f5.cu
#include "includes.h" __global__ void findPartIndicesKernel(int size, int *array, int *partIndices) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int value = array[idx]; int nextValue = (idx != size - 1) ? array[idx + 1] : -1; if (value != nextValue) { partIndices[value + 1] = idx + 1; } } }
29ec9f15ff6735b93f2183dfe527f6378663c9d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <cmath> #include <sys/time.h> #define BSZ (16) void checkErrors(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed hipError_t err; err = hipDeviceSynchronize(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label); } err = hipGetLastError(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label); } } double get_time() { struct timeval tim; hipDeviceSynchronize(); gettimeofday(&tim, NULL); return (double) tim.tv_sec+(tim.tv_usec/1000000.0); } // GPU kernel __global__ void copy_array(float *u, float *u_prev, int N) { int i = threadIdx.x; int j = threadIdx.y; int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i; if (I>=N*N){return;} u_prev[I] = u[I]; } __global__ void update (float *u, float *u_prev, int N, float h, float dt, float alpha) { // Setting up indices int i = threadIdx.x; int j = threadIdx.y; int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i; if (I>=N*N){return;} __shared__ float u_prev_sh[BSZ][BSZ]; u_prev_sh[i][j] = u_prev[I]; __syncthreads(); bool bound_check = ((I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1)); bool block_check = ((i>0) && (i<BSZ-1) && (j>0) && (j<BSZ-1)); // if not on block boundary do if (block_check) { u[I] = u_prev_sh[i][j] + alpha*dt/h/h * (u_prev_sh[i+1][j] + u_prev_sh[i-1][j] + u_prev_sh[i][j+1] + u_prev_sh[i][j-1] - 4*u_prev_sh[i][j]); } // if not on boundary else if (bound_check) //if (bound_check) { u[I] = u_prev[I] + alpha*dt/(h*h) * (u_prev[I+1] + u_prev[I-1] + u_prev[I+N] + u_prev[I-N] - 4*u_prev[I]); } // Boundary conditions are automatically imposed // as we don't touch boundaries } int main() { // Allocate in CPU int N = 128; int BLOCKSIZE = BSZ; hipSetDevice(2); float xmin = 0.0f; float xmax = 3.5f; float ymin = 0.0f; //float ymax = 2.0f; float h = (xmax-xmin)/(N-1); float dt = 0.00001f; float alpha = 0.645f; float time = 0.4f; int steps = (int) ceil(time/dt); int I; float *x = new float[N*N]; float *y = new float[N*N]; float *u = new float[N*N]; float *u_prev = new float[N*N]; // Generate mesh and intial condition for (int j=0; j<N; j++) { for (int i=0; i<N; i++) { I = N*j + i; x[I] = xmin + h*i; y[I] = ymin + h*j; u[I] = 0.0f; if ( (i==0) || (j==0)) {u[I] = 200.0f;} } } // Allocate in GPU float *u_d, *u_prev_d; hipMalloc( (void**) &u_d, N*N*sizeof(float)); hipMalloc( (void**) &u_prev_d, N*N*sizeof(float)); // Copy to GPU hipMemcpy(u_d, u, N*N*sizeof(float), hipMemcpyHostToDevice); // Loop dim3 dimGrid(int((N-0.5)/BLOCKSIZE)+1, int((N-0.5)/BLOCKSIZE)+1); dim3 dimBlock(BLOCKSIZE, BLOCKSIZE); double start = get_time(); for (int t=0; t<steps; t++) {hipLaunchKernelGGL(( copy_array) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N); hipLaunchKernelGGL(( update) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N, h, dt, alpha); } double stop = get_time(); checkErrors("update"); double elapsed = stop - start; std::cout<<"time = "<<elapsed<<std::endl; // Copy result back to host hipMemcpy(u, u_d, N*N*sizeof(float), hipMemcpyDeviceToHost); std::ofstream temperature("temperature_shared.txt"); for (int j=0; j<N; j++) { for (int i=0; i<N; i++) { I = N*j + i; // std::cout<<u[I]<<"\t"; temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[I]<<std::endl; } temperature<<"\n"; //std::cout<<std::endl; } temperature.close(); // Free device hipFree(u_d); hipFree(u_prev_d); }
29ec9f15ff6735b93f2183dfe527f6378663c9d7.cu
#include <iostream> #include <fstream> #include <cmath> #include <sys/time.h> #define BSZ (16) void checkErrors(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label); } } double get_time() { struct timeval tim; cudaThreadSynchronize(); gettimeofday(&tim, NULL); return (double) tim.tv_sec+(tim.tv_usec/1000000.0); } // GPU kernel __global__ void copy_array(float *u, float *u_prev, int N) { int i = threadIdx.x; int j = threadIdx.y; int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i; if (I>=N*N){return;} u_prev[I] = u[I]; } __global__ void update (float *u, float *u_prev, int N, float h, float dt, float alpha) { // Setting up indices int i = threadIdx.x; int j = threadIdx.y; int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i; if (I>=N*N){return;} __shared__ float u_prev_sh[BSZ][BSZ]; u_prev_sh[i][j] = u_prev[I]; __syncthreads(); bool bound_check = ((I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1)); bool block_check = ((i>0) && (i<BSZ-1) && (j>0) && (j<BSZ-1)); // if not on block boundary do if (block_check) { u[I] = u_prev_sh[i][j] + alpha*dt/h/h * (u_prev_sh[i+1][j] + u_prev_sh[i-1][j] + u_prev_sh[i][j+1] + u_prev_sh[i][j-1] - 4*u_prev_sh[i][j]); } // if not on boundary else if (bound_check) //if (bound_check) { u[I] = u_prev[I] + alpha*dt/(h*h) * (u_prev[I+1] + u_prev[I-1] + u_prev[I+N] + u_prev[I-N] - 4*u_prev[I]); } // Boundary conditions are automatically imposed // as we don't touch boundaries } int main() { // Allocate in CPU int N = 128; int BLOCKSIZE = BSZ; cudaSetDevice(2); float xmin = 0.0f; float xmax = 3.5f; float ymin = 0.0f; //float ymax = 2.0f; float h = (xmax-xmin)/(N-1); float dt = 0.00001f; float alpha = 0.645f; float time = 0.4f; int steps = (int) ceil(time/dt); int I; float *x = new float[N*N]; float *y = new float[N*N]; float *u = new float[N*N]; float *u_prev = new float[N*N]; // Generate mesh and intial condition for (int j=0; j<N; j++) { for (int i=0; i<N; i++) { I = N*j + i; x[I] = xmin + h*i; y[I] = ymin + h*j; u[I] = 0.0f; if ( (i==0) || (j==0)) {u[I] = 200.0f;} } } // Allocate in GPU float *u_d, *u_prev_d; cudaMalloc( (void**) &u_d, N*N*sizeof(float)); cudaMalloc( (void**) &u_prev_d, N*N*sizeof(float)); // Copy to GPU cudaMemcpy(u_d, u, N*N*sizeof(float), cudaMemcpyHostToDevice); // Loop dim3 dimGrid(int((N-0.5)/BLOCKSIZE)+1, int((N-0.5)/BLOCKSIZE)+1); dim3 dimBlock(BLOCKSIZE, BLOCKSIZE); double start = get_time(); for (int t=0; t<steps; t++) { copy_array <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N); update <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N, h, dt, alpha); } double stop = get_time(); checkErrors("update"); double elapsed = stop - start; std::cout<<"time = "<<elapsed<<std::endl; // Copy result back to host cudaMemcpy(u, u_d, N*N*sizeof(float), cudaMemcpyDeviceToHost); std::ofstream temperature("temperature_shared.txt"); for (int j=0; j<N; j++) { for (int i=0; i<N; i++) { I = N*j + i; // std::cout<<u[I]<<"\t"; temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[I]<<std::endl; } temperature<<"\n"; //std::cout<<std::endl; } temperature.close(); // Free device cudaFree(u_d); cudaFree(u_prev_d); }
360dd3b6d6d9126fdf3ab0c36c99b464402af3ef.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************** *@version-0.1 *@author Liyuqian- yuqianfly@gmail.com * *: * * * * * * * 1 * 2 * * 3 * 410240000bytes * 5 * 6 * 7 *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <cutil_inline.h> #include <cutil.h> #include <string.h> #include <locale.h> #include "loadDocs.h" #include <hip/hip_runtime.h> #include "InitDictionary.h" #define WORD_SIZE 30 #define WWORD_NUM 15 #ifndef __GLOBALVAR__ #define __GLOBALVAR__ texture<unsigned int, 1, hipReadModeElementType> rTBase; texture< int, 1, hipReadModeElementType> rTCheck; texture<unsigned char, 1, hipReadModeElementType> rTStatus; texture<unsigned int, 1, hipReadModeElementType> rTCharsHash; unsigned char * d_status; int * d_check; unsigned int * d_base; unsigned int * d_charsHash; #endif #if __DEVICE_EMULATION__ bool InitCUDA(void){return true;} #else bool InitCUDA(void) { int count = 0; int i = 0; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } hipSetDevice(i); printf("CUDA initialized.\n"); return true; } #endif #ifndef __INITCUDADATRIE__ #define __INITCUDADATRIE__ /**initial the global Variable method.*/ void loadAndBlendTrie(int *h_check, unsigned int *h_base, unsigned char *h_status, unsigned int *h_charsHash, unsigned int h_baseSize, unsigned int h_charHashSize){ unsigned int baseMemSize=sizeof(unsigned int)*h_baseSize; unsigned int checkMemSize=sizeof(int)*h_baseSize; unsigned int statusMemSize=sizeof(unsigned char)*h_baseSize; //global base CUDA_SAFE_CALL(hipMalloc( (void**)&d_base,baseMemSize)); CUDA_SAFE_CALL(hipMemset(d_base,0,baseMemSize)); CUDA_SAFE_CALL(hipMemcpy(d_base,h_base,baseMemSize,hipMemcpyHostToDevice)); //global check CUDA_SAFE_CALL(hipMalloc( (void**)&d_check,checkMemSize)); CUDA_SAFE_CALL(hipMemset(d_check,0,checkMemSize)); CUDA_SAFE_CALL( hipMemcpy(d_check,h_check,checkMemSize,hipMemcpyHostToDevice)); //global status CUDA_SAFE_CALL(hipMalloc( (void**)&d_status,statusMemSize)); CUDA_SAFE_CALL(hipMemset(d_status,0,statusMemSize)); CUDA_SAFE_CALL( hipMemcpy(d_status,h_status,statusMemSize,hipMemcpyHostToDevice)); //global charsHash CUDA_SAFE_CALL(hipMalloc( (void**)&d_charsHash,sizeof(unsigned int)*(h_charHashSize))); CUDA_SAFE_CALL(hipMemset(d_charsHash,'\0',sizeof(unsigned int)*(h_charHashSize))); CUDA_SAFE_CALL(hipMemcpy(d_charsHash,h_charsHash,sizeof(unsigned int)*(h_charHashSize),hipMemcpyHostToDevice)); //blend array cutilSafeCall(hipBindTexture(0, rTBase, d_base)); cutilSafeCall(hipBindTexture(0, rTCheck, d_check )); cutilSafeCall(hipBindTexture(0, rTStatus, d_status)); cutilSafeCall(hipBindTexture(0, rTCharsHash, d_charsHash )); } //texture blend bool initCUDAdatrieOpt(){ int *h_check; unsigned int *h_base; unsigned char *h_status; unsigned int *h_charsHash; InitDictionary initDic; h_base = initDic.base; h_check = initDic.check; h_status = initDic.status; // h_charsHash = initDic.charsHash; loadAndBlendTrie(h_check,h_base,h_status,h_charsHash,318608,65535); free(h_check); free(h_base); free(h_status); free(h_charsHash); return true; } void deleteTrie(){ hipUnbindTexture(rTBase); hipUnbindTexture(rTCheck); hipUnbindTexture(rTStatus); hipUnbindTexture(rTCharsHash); cutilSafeCall(hipFree(d_base)); cutilSafeCall(hipFree(d_check)); cutilSafeCall(hipFree(d_status)); cutilSafeCall(hipFree(d_charsHash)); } #endif /************************************************************************/ /* */ /************************************************************************/ /***/ __device__ void tGetAllWords(unsigned short *w_chars,int posFrom,int posTo,unsigned short *output){ int outputIndex=0; int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0; unsigned char stats='0'; int baseValue = 0; int checkValue = 0; for (; i <posTo; i++) { end++; charHashCode = tex1Dfetch(rTCharsHash,w_chars[i]); if( charHashCode<1 ) stats='0'; else{ checkValue=baseValue; baseValue = tex1Dfetch(rTBase,checkValue) + charHashCode; if (tex1Dfetch(rTCheck,baseValue) == checkValue || tex1Dfetch(rTCheck,baseValue) == -1) stats= tex1Dfetch(rTStatus,baseValue); else stats='0'; } switch (stats) { case '0': i = start; start++; end = 0; baseValue = 0; break; case '2': for(t=0;t<end;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; break; case '3': for(t=0;t<end;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; i = start; start++; end = 0; baseValue = 0; break; }//end of switch }//end of for } /***/ __device__ void tMaxFrontWords(unsigned short * w_chars,int posFrom,int posTo,unsigned short * output) { int outputIndex=0; int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0; unsigned char stats='0'; int tempEnd = 0; int baseValue = 0; int checkValue = 0; bool hasEnd = false; int wlen=posTo-posFrom; for(;i<posTo;i++){ end++; charHashCode = tex1Dfetch(rTCharsHash,w_chars[i]); if( charHashCode<1 ) stats='0'; else{ checkValue=baseValue; baseValue = tex1Dfetch(rTBase,checkValue) + charHashCode; if (tex1Dfetch(rTCheck,baseValue) == checkValue || tex1Dfetch(rTCheck,baseValue) == -1) stats= tex1Dfetch(rTStatus,baseValue); else stats='0'; } switch (stats) { case '0': if (hasEnd) { for(t=0;t<tempEnd;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; hasEnd = false; baseValue = 0; start = start + tempEnd ; i = start-1; tempEnd = 0; end = 0; break; } else { baseValue = 0; tempEnd = 0; i = start; start++; end = 0; } break; case '2': tempEnd = end; hasEnd = true; break; case '3': for(t=0;t<end;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49;//1 hasEnd = false; baseValue = 0; tempEnd = 0; start = i ; end = 0; break; } if (i == wlen - 1) { if (hasEnd) { for(t=0;t<tempEnd;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; hasEnd = false; baseValue = 0; start = start + tempEnd; i = start-1; tempEnd = 0; end = 0; break; } } } } /***/ __device__ void tMinFrontWords(unsigned short * w_chars,int posFrom,int posTo,unsigned short * output){ int outputIndex=0; int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0; unsigned char stats='0'; int baseValue = 0; int checkValue = 0; for (; i < posTo; i++) { end++; charHashCode = tex1Dfetch(rTCharsHash,w_chars[i]); if( charHashCode<1 ) stats='0'; else{ checkValue=baseValue; baseValue = tex1Dfetch(rTBase,checkValue) + charHashCode; if (tex1Dfetch(rTCheck,baseValue) == checkValue || tex1Dfetch(rTCheck,baseValue) == -1) stats= tex1Dfetch(rTStatus,baseValue); else stats='0'; } switch (stats) { case '0': baseValue = 0; i = start; start++; end = 0; break; case '2': for(t=0;t<end;t++) { output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; baseValue = 0; start = i+1; end = 0; break; case '3': for(t=0;t<end;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; baseValue = 0; start = i+1; end = 0; break; } } } /** * * block */ __global__ void tBatchSearchKernel(HostDocs * inputDocs,HostDocsTotalTokens *outputTokens){ int bid=blockIdx.x; //id int tid=blockIdx.x*blockDim.x+threadIdx.x;//id int docsize=inputDocs->DocStreamSize[bid];// int average=docsize/blockDim.x;// int start=threadIdx.x*average;// int end=start+average;// //tGetAllWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]); //tMaxFrontWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]); tMinFrontWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]); } /** blockblockTREAD_PER_BLOCK thread block_num* TREAD_PER_BLOCK MAX_TOKEN_PERTHREAD==100 100 */ void runCUDADATrieOpt(char * inputFold,char * outputFold){ // make double trie if(initCUDAdatrieOpt()) printf("InitCUDADAtrie success.\n\n"); else printf("*** initCUDADATrie failed!\n\n"); //inputFoldDOC_BATCH_SIZE==96 HostDocs *hdocs = loadBatchDocs(inputFold); printHostDocs("docs",hdocs); printf("\nCopy docs to GPU...\n"); HostDocs *ddocs; unsigned short **CPU_ARRAY; CPU_ARRAY =(unsigned short **)malloc(sizeof(unsigned short*)*DOC_BATCH_SIZE); memset(CPU_ARRAY,0,sizeof(unsigned short*)*DOC_BATCH_SIZE); int docSize=0,docsNum=hdocs->DocCount; for(int i=0;i<docsNum;i++){ docSize=hdocs->DocStreamSize[i]; cutilSafeCall( hipMalloc((void **)&CPU_ARRAY[i],sizeof(unsigned short)*docSize)); cutilSafeCall( hipMemset(CPU_ARRAY[i],0,sizeof(unsigned short)*(docSize))); cutilSafeCall( hipMemcpy(CPU_ARRAY[i],hdocs->DocStream[i],sizeof(unsigned short)*docSize,hipMemcpyHostToDevice)); } cutilSafeCall(hipMalloc( (void**)&ddocs,sizeof(HostDocs))); cutilSafeCall(hipMemcpy(ddocs->DocStream,CPU_ARRAY,sizeof(unsigned short*)*DOC_BATCH_SIZE,hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(ddocs->DocStreamSize,hdocs->DocStreamSize,sizeof(unsigned short)*DOC_BATCH_SIZE,hipMemcpyHostToDevice)); printf("End of copy\n\n"); //printHostDocs("d_docs test",bdocs); //cpu HostDocsTotalTokens *hDocAllTokens; int tokensTotalMemSize=TOTAL_THREADS_NUM*MAX_TOKEN_PERTHREAD;//128*96*100 hDocAllTokens=(HostDocsTotalTokens*)malloc(sizeof(HostDocsTotalTokens)); hDocAllTokens->threadsNum=0; memset(hDocAllTokens->ThreadsTokens,0,sizeof(unsigned short)*tokensTotalMemSize); // HostDocsTotalTokens *dDocAllTokens; CUDA_SAFE_CALL(hipMalloc( (void**)&dDocAllTokens,sizeof(HostDocsTotalTokens))); int tNum=docsNum*TREAD_PER_BLOCK;//2*128 cutilSafeCall(hipMemcpy( &dDocAllTokens->threadsNum,&tNum,sizeof(unsigned short),hipMemcpyHostToDevice)); cutilSafeCall(hipMemset( dDocAllTokens->ThreadsTokens,0,sizeof(unsigned short)*tokensTotalMemSize)); int blockNum=docsNum;// int threadsPerBlock=TREAD_PER_BLOCK;// dim3 dimBlock(threadsPerBlock,1,1); dim3 dimGrid(blockNum,1); printf("start kernel...\n"); unsigned int timer = 0; cutilCheckError( cutCreateTimer( &timer)); cutilCheckError( cutStartTimer( timer)); hipLaunchKernelGGL(( tBatchSearchKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, ddocs,dDocAllTokens); //testLoad<<<1,1>>>(ddocs,writeDoc); cutilCheckMsg("Kernel execution failed\n"); hipDeviceSynchronize(); cutilCheckError( cutStopTimer( timer)); printf("Kernel processing time: %f (ms)\n", cutGetTimerValue( timer)); cutilCheckError( cutDeleteTimer( timer)); printf("end of kernel\n"); cutilSafeCall(hipMemcpy(hDocAllTokens,dDocAllTokens,sizeof(HostDocsTotalTokens),hipMemcpyDeviceToHost)); writeDocsTotalTokens("keneal docs total tokens: minWords",outputFold,hDocAllTokens); // free(hdocs); free(hDocAllTokens); cutilSafeCall(hipFree(ddocs)); cutilSafeCall(hipFree(dDocAllTokens)); deleteTrie(); } int main(int argc, char* argv[]) { if(!InitCUDA()) { return 0; } char *console="outputFiles/OminWords_log_48p_64t.txt"; //freopen(console,"w",stdout); //out.txt //time_t timep; //time (&timep); //printf("------------------------\n"); //printf("%s\n",ctime(&timep)); char * inputFold="inputFiles/48/"; char * outputFold="outputFiles/OminWords_48p_64t.txt"; runCUDADATrieOpt(inputFold,outputFold); //time (&timep); //printf("%s\n",ctime(&timep)); //printf("------------------------\n"); //fclose(stdout);// CUT_EXIT(argc, argv); return 0; }
360dd3b6d6d9126fdf3ab0c36c99b464402af3ef.cu
/******************************************************************** *@version-0.1 *@author Liyuqian-李雨前 yuqianfly@gmail.com *华中科技大学计算机学院 智能与分布式计算实验室 *注意: 除华中科技大学计算机学院智能与分布式计算外, *任何个人、团队、研究结构、商业单位等不能对本算法进行专利申请或者撰写 *本算法的论文。 *任何个人、团队、研究结构、商业单位都可以对本算法进行使用、修改、扩展、传播。 *使用本算法不当造成的损失,责任有使用者自行负责。 * * 使用提示: * 1。本词典详细构造没有公开,有疑问请与作者联系 * 2。随代码附带词典信息与代码具有很强的相关系性,任何不正确修改都可能导致 * 分词异常。 * 3。使用本代码过程中,带来的损失,作者一概不负责 * 4。在启动运行前,需要足够的堆栈空间,例如10240000bytes * 5。优化版本和 非优化版本,在编译时,选择一个就行,不支持同时编译 * 6。三种分词可以选择对应不同的输出目录。 * 7。其他未尽描述的问题,请与作者联系。 *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <cutil_inline.h> #include <cutil.h> #include <string.h> #include <locale.h> #include "loadDocs.h" #include <cuda_runtime.h> #include "InitDictionary.h" #define WORD_SIZE 30 #define WWORD_NUM 15 #ifndef __GLOBALVAR__ #define __GLOBALVAR__ texture<unsigned int, 1, cudaReadModeElementType> rTBase; texture< int, 1, cudaReadModeElementType> rTCheck; texture<unsigned char, 1, cudaReadModeElementType> rTStatus; texture<unsigned int, 1, cudaReadModeElementType> rTCharsHash; unsigned char * d_status; int * d_check; unsigned int * d_base; unsigned int * d_charsHash; #endif #if __DEVICE_EMULATION__ bool InitCUDA(void){return true;} #else bool InitCUDA(void) { int count = 0; int i = 0; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } cudaSetDevice(i); printf("CUDA initialized.\n"); return true; } #endif #ifndef __INITCUDADATRIE__ #define __INITCUDADATRIE__ /**initial the global Variable method.*/ void loadAndBlendTrie(int *h_check, unsigned int *h_base, unsigned char *h_status, unsigned int *h_charsHash, unsigned int h_baseSize, unsigned int h_charHashSize){ unsigned int baseMemSize=sizeof(unsigned int)*h_baseSize; unsigned int checkMemSize=sizeof(int)*h_baseSize; unsigned int statusMemSize=sizeof(unsigned char)*h_baseSize; //global base CUDA_SAFE_CALL(cudaMalloc( (void**)&d_base,baseMemSize)); CUDA_SAFE_CALL(cudaMemset(d_base,0,baseMemSize)); CUDA_SAFE_CALL(cudaMemcpy(d_base,h_base,baseMemSize,cudaMemcpyHostToDevice)); //global check CUDA_SAFE_CALL(cudaMalloc( (void**)&d_check,checkMemSize)); CUDA_SAFE_CALL(cudaMemset(d_check,0,checkMemSize)); CUDA_SAFE_CALL( cudaMemcpy(d_check,h_check,checkMemSize,cudaMemcpyHostToDevice)); //global status CUDA_SAFE_CALL(cudaMalloc( (void**)&d_status,statusMemSize)); CUDA_SAFE_CALL(cudaMemset(d_status,0,statusMemSize)); CUDA_SAFE_CALL( cudaMemcpy(d_status,h_status,statusMemSize,cudaMemcpyHostToDevice)); //global charsHash CUDA_SAFE_CALL(cudaMalloc( (void**)&d_charsHash,sizeof(unsigned int)*(h_charHashSize))); CUDA_SAFE_CALL(cudaMemset(d_charsHash,'\0',sizeof(unsigned int)*(h_charHashSize))); CUDA_SAFE_CALL(cudaMemcpy(d_charsHash,h_charsHash,sizeof(unsigned int)*(h_charHashSize),cudaMemcpyHostToDevice)); //blend array cutilSafeCall(cudaBindTexture(0, rTBase, d_base)); cutilSafeCall(cudaBindTexture(0, rTCheck, d_check )); cutilSafeCall(cudaBindTexture(0, rTStatus, d_status)); cutilSafeCall(cudaBindTexture(0, rTCharsHash, d_charsHash )); } //texture blend bool initCUDAdatrieOpt(){ int *h_check; unsigned int *h_base; unsigned char *h_status; unsigned int *h_charsHash; InitDictionary initDic; h_base = initDic.base; h_check = initDic.check; h_status = initDic.status; //按位处理 h_charsHash = initDic.charsHash; loadAndBlendTrie(h_check,h_base,h_status,h_charsHash,318608,65535); free(h_check); free(h_base); free(h_status); free(h_charsHash); return true; } void deleteTrie(){ cudaUnbindTexture(rTBase); cudaUnbindTexture(rTCheck); cudaUnbindTexture(rTStatus); cudaUnbindTexture(rTCharsHash); cutilSafeCall(cudaFree(d_base)); cutilSafeCall(cudaFree(d_check)); cutilSafeCall(cudaFree(d_status)); cutilSafeCall(cudaFree(d_charsHash)); } #endif /************************************************************************/ /* 三种分词实现 */ /************************************************************************/ /**正向全匹配分词*/ __device__ void tGetAllWords(unsigned short *w_chars,int posFrom,int posTo,unsigned short *output){ int outputIndex=0; int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0; unsigned char stats='0'; int baseValue = 0; int checkValue = 0; for (; i <posTo; i++) { end++; charHashCode = tex1Dfetch(rTCharsHash,w_chars[i]); if( charHashCode<1 ) stats='0'; else{ checkValue=baseValue; baseValue = tex1Dfetch(rTBase,checkValue) + charHashCode; if (tex1Dfetch(rTCheck,baseValue) == checkValue || tex1Dfetch(rTCheck,baseValue) == -1) stats= tex1Dfetch(rTStatus,baseValue); else stats='0'; } switch (stats) { case '0': i = start; start++; end = 0; baseValue = 0; break; case '2': for(t=0;t<end;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; break; case '3': for(t=0;t<end;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; i = start; start++; end = 0; baseValue = 0; break; }//end of switch }//end of for } /**正向最大匹配分词*/ __device__ void tMaxFrontWords(unsigned short * w_chars,int posFrom,int posTo,unsigned short * output) { int outputIndex=0; int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0; unsigned char stats='0'; int tempEnd = 0; int baseValue = 0; int checkValue = 0; bool hasEnd = false; int wlen=posTo-posFrom; for(;i<posTo;i++){ end++; charHashCode = tex1Dfetch(rTCharsHash,w_chars[i]); if( charHashCode<1 ) stats='0'; else{ checkValue=baseValue; baseValue = tex1Dfetch(rTBase,checkValue) + charHashCode; if (tex1Dfetch(rTCheck,baseValue) == checkValue || tex1Dfetch(rTCheck,baseValue) == -1) stats= tex1Dfetch(rTStatus,baseValue); else stats='0'; } switch (stats) { case '0': if (hasEnd) { for(t=0;t<tempEnd;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; hasEnd = false; baseValue = 0; start = start + tempEnd ; i = start-1; tempEnd = 0; end = 0; break; } else { baseValue = 0; tempEnd = 0; i = start; start++; end = 0; } break; case '2': tempEnd = end; hasEnd = true; break; case '3': for(t=0;t<end;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49;//代表字符1 hasEnd = false; baseValue = 0; tempEnd = 0; start = i ; end = 0; break; } if (i == wlen - 1) { if (hasEnd) { for(t=0;t<tempEnd;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; hasEnd = false; baseValue = 0; start = start + tempEnd; i = start-1; tempEnd = 0; end = 0; break; } } } } /**正向最小匹配分词*/ __device__ void tMinFrontWords(unsigned short * w_chars,int posFrom,int posTo,unsigned short * output){ int outputIndex=0; int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0; unsigned char stats='0'; int baseValue = 0; int checkValue = 0; for (; i < posTo; i++) { end++; charHashCode = tex1Dfetch(rTCharsHash,w_chars[i]); if( charHashCode<1 ) stats='0'; else{ checkValue=baseValue; baseValue = tex1Dfetch(rTBase,checkValue) + charHashCode; if (tex1Dfetch(rTCheck,baseValue) == checkValue || tex1Dfetch(rTCheck,baseValue) == -1) stats= tex1Dfetch(rTStatus,baseValue); else stats='0'; } switch (stats) { case '0': baseValue = 0; i = start; start++; end = 0; break; case '2': for(t=0;t<end;t++) { output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; baseValue = 0; start = i+1; end = 0; break; case '3': for(t=0;t<end;t++){ output[outputIndex++]=w_chars[t+start]; } output[outputIndex++]=49; baseValue = 0; start = i+1; end = 0; break; } } } /**内核入口函数 * 功能:进行批量文档分词 * 文档按线程数平均划分,每个文档对应一个block */ __global__ void tBatchSearchKernel(HostDocs * inputDocs,HostDocsTotalTokens *outputTokens){ int bid=blockIdx.x; //块全局id int tid=blockIdx.x*blockDim.x+threadIdx.x;//线程全局id int docsize=inputDocs->DocStreamSize[bid];//快对应文档大小 int average=docsize/blockDim.x;//每个线程数据 int start=threadIdx.x*average;//包括端点 int end=start+average;//不包括端点 //tGetAllWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]); //tMaxFrontWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]); tMinFrontWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]); } /** 这个版本功能: 根据加载的文档个数,启动相应的block数,每个block分配TREAD_PER_BLOCK线程 分词结果按照thread单位保存,即 block_num* TREAD_PER_BLOCK 个数组元素, 每个元素长度MAX_TOKEN_PER_THREAD==100 即每个线程分词结果最大100个中文字 */ void runCUDADATrieOpt(char * inputFold,char * outputFold){ // make double trie if(initCUDAdatrieOpt()) printf("InitCUDADAtrie success.\n\n"); else printf("*** initCUDADATrie failed!\n\n"); //从文件夹inputFold加载批量文档,测试用例,不要超过批量上限DOC_BATCH_SIZE==96 HostDocs *hdocs = loadBatchDocs(inputFold); printHostDocs("docs",hdocs); printf("\nCopy docs to GPU...\n"); HostDocs *ddocs; unsigned short **CPU_ARRAY; CPU_ARRAY =(unsigned short **)malloc(sizeof(unsigned short*)*DOC_BATCH_SIZE); memset(CPU_ARRAY,0,sizeof(unsigned short*)*DOC_BATCH_SIZE); int docSize=0,docsNum=hdocs->DocCount; for(int i=0;i<docsNum;i++){ docSize=hdocs->DocStreamSize[i]; cutilSafeCall( cudaMalloc((void **)&CPU_ARRAY[i],sizeof(unsigned short)*docSize)); cutilSafeCall( cudaMemset(CPU_ARRAY[i],0,sizeof(unsigned short)*(docSize))); cutilSafeCall( cudaMemcpy(CPU_ARRAY[i],hdocs->DocStream[i],sizeof(unsigned short)*docSize,cudaMemcpyHostToDevice)); } cutilSafeCall(cudaMalloc( (void**)&ddocs,sizeof(HostDocs))); cutilSafeCall(cudaMemcpy(ddocs->DocStream,CPU_ARRAY,sizeof(unsigned short*)*DOC_BATCH_SIZE,cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(ddocs->DocStreamSize,hdocs->DocStreamSize,sizeof(unsigned short)*DOC_BATCH_SIZE,cudaMemcpyHostToDevice)); printf("End of copy\n\n"); //printHostDocs("d_docs test",bdocs); //cpu端接受内核输出结果 HostDocsTotalTokens *hDocAllTokens; int tokensTotalMemSize=TOTAL_THREADS_NUM*MAX_TOKEN_PER_THREAD;//128*96*100 hDocAllTokens=(HostDocsTotalTokens*)malloc(sizeof(HostDocsTotalTokens)); hDocAllTokens->threadsNum=0; memset(hDocAllTokens->ThreadsTokens,0,sizeof(unsigned short)*tokensTotalMemSize); //内核输出结果 HostDocsTotalTokens *dDocAllTokens; CUDA_SAFE_CALL(cudaMalloc( (void**)&dDocAllTokens,sizeof(HostDocsTotalTokens))); int tNum=docsNum*TREAD_PER_BLOCK;//全部线程数目2*128 cutilSafeCall(cudaMemcpy( &dDocAllTokens->threadsNum,&tNum,sizeof(unsigned short),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemset( dDocAllTokens->ThreadsTokens,0,sizeof(unsigned short)*tokensTotalMemSize)); int blockNum=docsNum;//工作线程块数目 int threadsPerBlock=TREAD_PER_BLOCK;//每个线程块里面的线程个数 dim3 dimBlock(threadsPerBlock,1,1); dim3 dimGrid(blockNum,1); printf("start kernel...\n"); unsigned int timer = 0; cutilCheckError( cutCreateTimer( &timer)); cutilCheckError( cutStartTimer( timer)); tBatchSearchKernel<<<dimGrid,dimBlock>>>(ddocs,dDocAllTokens); //testLoad<<<1,1>>>(ddocs,writeDoc); cutilCheckMsg("Kernel execution failed\n"); cudaThreadSynchronize(); cutilCheckError( cutStopTimer( timer)); printf("Kernel processing time: %f (ms)\n", cutGetTimerValue( timer)); cutilCheckError( cutDeleteTimer( timer)); printf("end of kernel\n"); cutilSafeCall(cudaMemcpy(hDocAllTokens,dDocAllTokens,sizeof(HostDocsTotalTokens),cudaMemcpyDeviceToHost)); writeDocsTotalTokens("keneal docs total tokens: minWords",outputFold,hDocAllTokens); //释放资源 free(hdocs); free(hDocAllTokens); cutilSafeCall(cudaFree(ddocs)); cutilSafeCall(cudaFree(dDocAllTokens)); deleteTrie(); } int main(int argc, char* argv[]) { if(!InitCUDA()) { return 0; } char *console="outputFiles/OminWords_log_48p_64t.txt"; //freopen(console,"w",stdout); //输出重定向,输出数据将保存在out.txt文件中 //time_t timep; //time (&timep); //printf("------------------------\n"); //printf("%s\n",ctime(&timep)); char * inputFold="inputFiles/48/"; char * outputFold="outputFiles/OminWords_48p_64t.txt"; runCUDADATrieOpt(inputFold,outputFold); //time (&timep); //printf("%s\n",ctime(&timep)); //printf("------------------------\n"); //fclose(stdout);//关闭文件 CUT_EXIT(argc, argv); return 0; }
74c40fb2222111f56378a2c422685487ca6c39cf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <algorithm> #include <vector> #include <cusp/complex.h> #include <cusp/blas.h> #include <hipcub/hipcub.hpp> #include <thrust/reduce.h> #include "mex.h" #include "gpu/mxGPUArray.h" #include "hip/hip_runtime.h" #include "polargrid.h" //#define N=256; //texture<float, 1, hipReadModeElementType> texRef; void error_handle(hipError_t status = hipErrorLaunchFailure); void error_handle(hipError_t status){ if(status != hipSuccess){ hipError_t s= hipGetLastError(); if(s != hipSuccess){ // printf("%s\n",hipGetErrorString(s)); exit(1); } } } __host__ __device__ float weight(int2 grid_pos, float2 point_pos){ return weight(make_float2(grid_pos.x,grid_pos.y),point_pos); } __host__ __device__ float weight(float2 grid_pos, float2 point_pos){ if(fabs(grid_pos.x-point_pos.x) >= 3.0f || fabs(grid_pos.y-point_pos.y) >= 3.0f){ return 0; } return fabs(grid_pos.x-point_pos.x)+ fabs(grid_pos.y-point_pos.y); } __device__ float kb_weight(float2 grid_pos, float2 point_pos, int kb_table_size, float kb_table_scale,hipTextureObject_t texRef){ float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale; float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale; int ix = (int)dist_x; float fx = dist_x-ix; int iy = (int)dist_y; float fy = dist_y-iy; if(ix+1 < kb_table_size && iy+1 < kb_table_size){ return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) * (tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy)); } return 0.0f; } __device__ float kb_weight(float grid_x, float grid_y, float point_pos_x, float point_pos_y, int kb_table_size, float kb_table_scale,hipTextureObject_t texRef){ float dist_x = fabsf(grid_x-point_pos_x)*kb_table_scale; float dist_y = fabsf(grid_y-point_pos_y)*kb_table_scale; int ix = (int)dist_x; float fx = dist_x-ix; int iy = (int)dist_y; float fy = dist_y-iy; if(ix+1 < kb_table_size && iy+1 < kb_table_size){ return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) * (tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy)); } return 0.0f; } __device__ float kb_weight(float2 grid_pos, float2 point_pos, int kb_table_size, float kb_table_scale,int tid,hipTextureObject_t texRef){ float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale; float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale; float ix = rintf(dist_x); float fx = dist_x-ix; float iy = rintf(dist_y); float fy = dist_y-iy; if(ix+1 < kb_table_size && iy+1 < kb_table_size){ return (tex1Dfetch<float>(texRef,tid)*(1.0f-fx) + tex1Dfetch<float>(texRef,tid)*(fx)) * (tex1Dfetch<float>(texRef,tid)*(1.0f-fy) + tex1Dfetch<float>(texRef,tid)*(fy)); } return 0.0f; } __global__ void sum_points( const float * binned_points_x, const float * binned_points_y, const cusp::complex<float> * point_value, uint2 grid_size, const int * binned_points, const int idx, const int ppb, int x, int y, const int kb_table_size, const float kb_table_scale, hipTextureObject_t texRef,cusp::complex<float> * grid_value) { __shared__ cusp::complex<float> sum_t[SHARED_SIZE]; int tid = threadIdx.x; sum_t[tid] = 0; grid_value[y*grid_size.x+x]=1e3; for(int j = tid;j<ppb;j+=SHARED_SIZE){ sum_t[tid] += point_value[binned_points[idx+j]]; // kb_weight(make_float2(x,y), // make_float2(binned_points_x[idx+j],binned_points_y[idx+j]), // kb_table_size,kb_table_scale); } return; for(unsigned int j=1; j < blockDim.x; j *= 2) { // modulo arithmetic is slow! if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; } __syncthreads(); } __syncthreads(); if(tid == 0){ //grid_value[y*grid_size.x+x]=sum_t[0]; grid_value[y*grid_size.x+x]=sum_t[0]; // printf("sum = %g\n", sum_t[0]); return; } } __global__ void grid_points_cuda_mex_interleaved_kernel(const float * point_x, const float * point_y, const cusp::complex<float> * point_value, int npoints, uint2 grid_size, const int * points_per_bin, const int * bin_dimension_x, const int * bin_dimension_y, const int * binned_points, const int * binned_points_idx, const int * bin_location, const float * binned_points_x, const float * binned_points_y, const int nbins, const int kb_table_size, const float kb_table_scale, hipTextureObject_t texRef, cusp::complex<float> * grid_value){ __shared__ cusp::complex<float> value; // Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float> //typedef hipcub::BlockReduce<cusp::complex<float>, 128> BlockReduce; // Allocate shared memory for BlockReduce //__shared__ typename BlockReduce::TempStorage temp_storage; //int aggregate = BlockReduce(temp_storage).Sum(thread_data); int i = blockIdx.x; int tid = threadIdx.x; uint2 corner; corner.x = bin_location[i]%grid_size.x; corner.y = bin_location[i]/grid_size.x; const int idx = binned_points_idx[i]; const int ppb = points_per_bin[i]; // cusp::complex<float> * value; // small bin or large no of samples if(bin_dimension_x[i]*bin_dimension_y[i] < 64 || points_per_bin[i] > SHARED_SIZE){ __shared__ cusp::complex<float> sum_t[BLOCKSIZE]; // loop through grid for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){ for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){ if( 0){ if(tid == 0){ hipLaunchKernelGGL(( sum_points), dim3(1),dim3(blockDim), 0, 0, binned_points_x,binned_points_y,point_value,grid_size, binned_points,ppb,idx,x,y, kb_table_size,kb_table_scale,texRef, grid_value); __syncthreads(); hipDeviceSynchronize(); __syncthreads(); // printf("sum = %g\n", grid_value[y*grid_size.x+x]); } }else{ sum_t[tid] = 0; for(int j = tid;j<ppb;j+=blockDim.x){ sum_t[tid] += point_value[binned_points[idx+j]]* kb_weight(make_float2(x,y), make_float2(binned_points_x[idx+j],binned_points_y[idx+j]), kb_table_size,kb_table_scale,texRef); } for(unsigned int j=1; j < blockDim.x; j *= 2) { // modulo arithmetic is slow! if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; } __syncthreads(); } if(tid == 0){grid_value[y*grid_size.x+x]+=sum_t[0]; } } } } // large dimensions }else if(bin_dimension_x[i]*bin_dimension_y[i] >BLOCKSIZE/2-1) { __shared__ float point_pos_cache_x[SHARED_SIZE]; __shared__ float point_pos_cache_y[SHARED_SIZE]; __shared__ cusp::complex<float> point_value_cache[SHARED_SIZE]; __shared__ cusp::complex<float> sum_t[BLOCKSIZE]; // Lets try to load all points to shared memory / for(int j = tid;j<ppb;j+= blockDim.x){ const int point = binned_points[idx+j]; point_value_cache[j] = point_value[point]; point_pos_cache_x[j] = binned_points_x[idx+j]; point_pos_cache_y[j] = binned_points_y[idx+j]; } __syncthreads(); const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]}; // loop through dimensions for(int k = tid;k<dims.x*dims.y;k+=blockDim.x){ const int x = (k%(dims.x))+corner.x; const int y = (k/dims.x)+corner.y; cusp::complex<float> my_sum = 0; for(int j = 0;j<ppb;j++){ //loop through all the points float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,texRef); my_sum += point_value_cache[j]*w; } grid_value[y*grid_size.x+x] = my_sum; } }else{ //small dimension and few points __shared__ float point_pos_cache_x[SHARED_SIZE]; __shared__ float point_pos_cache_y[SHARED_SIZE]; __shared__ cusp::complex<float> point_value_cache[SHARED_SIZE]; __shared__ cusp::complex<float> sum_t[BLOCKSIZE]; // Lets try to load things to shared memory / for(int j = tid;j<ppb;j+= blockDim.x){ const int point = binned_points[idx+j]; point_value_cache[j] = point_value[point]; point_pos_cache_x[j] = binned_points_x[idx+j]; point_pos_cache_y[j] = binned_points_y[idx+j]; } __syncthreads(); const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]}; int b = 4; for(int k = tid/b;k<dims.x*dims.y;k+=blockDim.x/b){ const int x = (k%(dims.x))+corner.x; const int y = (k/dims.x)+corner.y; sum_t[tid] = 0; //sum_i[tid] = 0; for(int j = (tid&(b-1));j<ppb;j+=b){ float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,texRef); sum_t[tid] += point_value_cache[j]*w; } // Do a reduce in shared memory for(unsigned int j=1; j < b; j = (j << 1)) { // modulo arithmetic is slow! if ((tid & ((j<<1)-1)) == 0) { sum_t[tid] += sum_t[tid + j]; } __syncthreads(); } if((tid&(b-1)) == 0){ grid_value[y*grid_size.x+x] = sum_t[tid]; } } } } void grid_points_cuda_interleaved_mex(const float * point_pos_x, const float * point_pos_y, const cusp::complex<float> * point_value, int npoints, uint2 grid_size, const int * points_per_bin, const int * bin_dimension_x, const int * bin_dimension_y, const int * binned_points, const int * binned_points_idx, const int * bin_location, const float * binned_points_x, const float * binned_points_y, int nbins, const float * kb_table, const int kb_table_size, const float kb_table_scale, hipTextureObject_t texRef, cusp::complex<float> * grid_value){ hipMemset(grid_value,0,sizeof(float2)*grid_size.x*grid_size.y); /* size_t offset; hipBindTexture(&offset,texRef, kb_table, sizeof(float)*kb_table_size); if(offset != 0){ // printf("Error: Texture offset different than zero. Table not allocated with hipMalloc!%d\n"); return; } */ int grid = nbins; int block_size = BLOCKSIZE; clock_t t_i = clock(); hipLaunchKernelGGL(( grid_points_cuda_mex_interleaved_kernel), dim3(grid),dim3(block_size), 0, 0, point_pos_x, point_pos_y, point_value, npoints, grid_size, points_per_bin, bin_dimension_x, bin_dimension_y, binned_points, binned_points_idx, bin_location, binned_points_x, binned_points_y,nbins, kb_table_size, kb_table_scale,texRef, grid_value); hipDeviceSynchronize(); clock_t t_e = clock(); error_handle(); // printf("%d iter in %5.1f ms\n",iter,(t_e-t_i)*1000.0/CLOCKS_PER_SEC); } #define SX prhs[0] #define SY prhs[1] #define SV prhs[2] #define GRID_DIM prhs[3] #define SPB prhs[4] #define BIN_DIM_X prhs[5] #define BIN_DIM_Y prhs[6] #define SIB prhs[7] #define BSO prhs[8] #define BL prhs[9] #define BPX prhs[10] #define BPY prhs[11] #define KLUT prhs[12] #define KLUTS prhs[13] void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){ /* Initialize the MathWorks GPU API. */ mxInitGPU(); mxGPUArray const *samples_x; mxGPUArray const *samples_y; mxGPUArray const *samples_values; mxGPUArray const *samples_per_bin; mxGPUArray const *bin_dimensions_x; mxGPUArray const *bin_dimensions_y; mxGPUArray const *samples_in_bin; mxGPUArray const *bin_start_offset; mxGPUArray const *bin_location; mxGPUArray const *bin_points_x; mxGPUArray const *bin_points_y; mxGPUArray const *kernel_lookup_table; //int *grid_dim =(int *) mxGetPr(GRID_DIM); float kernel_lookup_table_scale = mxGetScalar(KLUTS); int *grid_dim0=( int *) (mxGetData(GRID_DIM)); mwSize *grid_dim=(mwSize *)grid_dim0; //mexPrintf("Grid Dimensions %d x %d\n",(grid_dim[0]),(grid_dim[1])); //mexPrintf("Grid Dimensions %d x %d\n",(grid_dim1[0]),(grid_dim1[1])); // OUTPUT mxGPUArray *grid_values; samples_x = mxGPUCreateFromMxArray(SX); samples_y = mxGPUCreateFromMxArray(SY); samples_values = mxGPUCreateFromMxArray(SV); samples_per_bin = mxGPUCreateFromMxArray(SPB); bin_dimensions_x = mxGPUCreateFromMxArray(BIN_DIM_X); bin_dimensions_y = mxGPUCreateFromMxArray(BIN_DIM_Y); samples_in_bin = mxGPUCreateFromMxArray(SIB); bin_start_offset = mxGPUCreateFromMxArray(BSO); bin_location = mxGPUCreateFromMxArray(BL); bin_points_x = mxGPUCreateFromMxArray(BPX); bin_points_y = mxGPUCreateFromMxArray(BPY); kernel_lookup_table= mxGPUCreateFromMxArray(KLUT); int nbins = (int) (mxGPUGetNumberOfElements(bin_dimensions_x)); int npoints = (int)(mxGPUGetNumberOfElements(samples_x)); int kernel_lookup_table_size = ( int)(mxGPUGetNumberOfElements(kernel_lookup_table)); mwSize ndim= 2; // output: // float2 * grid_values; // float2 * gold_grid_values; // plhs[0] = jkt_new( grid_dim[0], grid_dim[1], mxSINGLE_CLASS, mxREAL,); //grid_values= mxGPUCreateGPUArray(ndim,grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); // now get the pointer or whatever it is const float *d_samples_x = (const float *)(mxGPUGetDataReadOnly(samples_x)); const float *d_samples_y = (const float *)(mxGPUGetDataReadOnly(samples_y)); // float2 *d_samples_values = (float2 *)(const float2 *)(mxGPUGetDataReadOnly(samples_values)); const cusp::complex<float> *d_samples_values = (const cusp::complex<float> *)(mxGPUGetDataReadOnly(samples_values)); const int * d_samples_per_bin = (const int *)(mxGPUGetDataReadOnly(samples_per_bin)); const int * d_bin_dimensions_x = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_x)); const int * d_bin_dimensions_y = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_y)); const int * d_samples_in_bin = (const int *)(mxGPUGetDataReadOnly(samples_in_bin)); const int * d_bin_start_offset =(const int *)(mxGPUGetDataReadOnly(bin_start_offset)); const int * d_bin_location = (const int *)(mxGPUGetDataReadOnly(bin_location)); const float * d_bin_points_x = (const float *)(mxGPUGetDataReadOnly(bin_points_x)); const float * d_bin_points_y = (const float *)(mxGPUGetDataReadOnly(bin_points_y)); float * d_kernel_lookup_table = ( float *)(mxGPUGetDataReadOnly(kernel_lookup_table)); const uint2 grid_size = {grid_dim[0],grid_dim[1]}; //float2 * d_grid_values = (float2 *)(mxGPUGetData(grid_values)); cusp::complex<float> * d_grid_values = (cusp::complex<float> *)(mxGPUGetData(grid_values)); //-------------------------------------------- // // create texture object hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeLinear; resDesc.res.linear.devPtr = d_kernel_lookup_table; resDesc.res.linear.desc.f = hipChannelFormatKindFloat; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.sizeInBytes = kernel_lookup_table_size*sizeof(float); hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = hipReadModeElementType; // create texture object: we only have to do this once! hipTextureObject_t texRef=0; hipCreateTextureObject(&texRef, &resDesc, &texDesc, NULL); //-------------------------------------------- grid_points_cuda_interleaved_mex( d_samples_x, d_samples_y, d_samples_values, npoints, grid_size, d_samples_per_bin, d_bin_dimensions_x, d_bin_dimensions_y, d_samples_in_bin, d_bin_start_offset, d_bin_location, d_bin_points_x, d_bin_points_y, nbins, d_kernel_lookup_table, kernel_lookup_table_size, kernel_lookup_table_scale,texRef, d_grid_values); //mexErrMsgTxt("gpuArray 2"); plhs[0] = mxGPUCreateMxArrayOnGPU(grid_values); /* */ mxGPUDestroyGPUArray( samples_x); mxGPUDestroyGPUArray( samples_y); mxGPUDestroyGPUArray( samples_values); mxGPUDestroyGPUArray( samples_per_bin); mxGPUDestroyGPUArray( bin_dimensions_x); mxGPUDestroyGPUArray( bin_dimensions_y); mxGPUDestroyGPUArray( samples_in_bin); mxGPUDestroyGPUArray( kernel_lookup_table); mxGPUDestroyGPUArray( bin_start_offset); mxGPUDestroyGPUArray( bin_location); mxGPUDestroyGPUArray( bin_points_x); mxGPUDestroyGPUArray( bin_points_y); mxGPUDestroyGPUArray( grid_values); }
74c40fb2222111f56378a2c422685487ca6c39cf.cu
#include <stdlib.h> #include <stdio.h> #include <algorithm> #include <vector> #include <cusp/complex.h> #include <cusp/blas.h> #include <cub/cub.cuh> #include <thrust/reduce.h> #include "mex.h" #include "gpu/mxGPUArray.h" #include "cuda.h" #include "polargrid.h" //#define N=256; //texture<float, 1, cudaReadModeElementType> texRef; void error_handle(cudaError_t status = cudaErrorLaunchFailure); void error_handle(cudaError_t status){ if(status != cudaSuccess){ cudaError_t s= cudaGetLastError(); if(s != cudaSuccess){ // printf("%s\n",cudaGetErrorString(s)); exit(1); } } } __host__ __device__ float weight(int2 grid_pos, float2 point_pos){ return weight(make_float2(grid_pos.x,grid_pos.y),point_pos); } __host__ __device__ float weight(float2 grid_pos, float2 point_pos){ if(fabs(grid_pos.x-point_pos.x) >= 3.0f || fabs(grid_pos.y-point_pos.y) >= 3.0f){ return 0; } return fabs(grid_pos.x-point_pos.x)+ fabs(grid_pos.y-point_pos.y); } __device__ float kb_weight(float2 grid_pos, float2 point_pos, int kb_table_size, float kb_table_scale,cudaTextureObject_t texRef){ float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale; float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale; int ix = (int)dist_x; float fx = dist_x-ix; int iy = (int)dist_y; float fy = dist_y-iy; if(ix+1 < kb_table_size && iy+1 < kb_table_size){ return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) * (tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy)); } return 0.0f; } __device__ float kb_weight(float grid_x, float grid_y, float point_pos_x, float point_pos_y, int kb_table_size, float kb_table_scale,cudaTextureObject_t texRef){ float dist_x = fabsf(grid_x-point_pos_x)*kb_table_scale; float dist_y = fabsf(grid_y-point_pos_y)*kb_table_scale; int ix = (int)dist_x; float fx = dist_x-ix; int iy = (int)dist_y; float fy = dist_y-iy; if(ix+1 < kb_table_size && iy+1 < kb_table_size){ return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) * (tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy)); } return 0.0f; } __device__ float kb_weight(float2 grid_pos, float2 point_pos, int kb_table_size, float kb_table_scale,int tid,cudaTextureObject_t texRef){ float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale; float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale; float ix = rintf(dist_x); float fx = dist_x-ix; float iy = rintf(dist_y); float fy = dist_y-iy; if(ix+1 < kb_table_size && iy+1 < kb_table_size){ return (tex1Dfetch<float>(texRef,tid)*(1.0f-fx) + tex1Dfetch<float>(texRef,tid)*(fx)) * (tex1Dfetch<float>(texRef,tid)*(1.0f-fy) + tex1Dfetch<float>(texRef,tid)*(fy)); } return 0.0f; } __global__ void sum_points( const float * binned_points_x, const float * binned_points_y, const cusp::complex<float> * point_value, uint2 grid_size, const int * binned_points, const int idx, const int ppb, int x, int y, const int kb_table_size, const float kb_table_scale, cudaTextureObject_t texRef,cusp::complex<float> * grid_value) { __shared__ cusp::complex<float> sum_t[SHARED_SIZE]; int tid = threadIdx.x; sum_t[tid] = 0; grid_value[y*grid_size.x+x]=1e3; for(int j = tid;j<ppb;j+=SHARED_SIZE){ sum_t[tid] += point_value[binned_points[idx+j]]; // kb_weight(make_float2(x,y), // make_float2(binned_points_x[idx+j],binned_points_y[idx+j]), // kb_table_size,kb_table_scale); } return; for(unsigned int j=1; j < blockDim.x; j *= 2) { // modulo arithmetic is slow! if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; } __syncthreads(); } __syncthreads(); if(tid == 0){ //grid_value[y*grid_size.x+x]=sum_t[0]; grid_value[y*grid_size.x+x]=sum_t[0]; // printf("sum = %g\n", sum_t[0]); return; } } __global__ void grid_points_cuda_mex_interleaved_kernel(const float * point_x, const float * point_y, const cusp::complex<float> * point_value, int npoints, uint2 grid_size, const int * points_per_bin, const int * bin_dimension_x, const int * bin_dimension_y, const int * binned_points, const int * binned_points_idx, const int * bin_location, const float * binned_points_x, const float * binned_points_y, const int nbins, const int kb_table_size, const float kb_table_scale, cudaTextureObject_t texRef, cusp::complex<float> * grid_value){ __shared__ cusp::complex<float> value; // Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float> //typedef cub::BlockReduce<cusp::complex<float>, 128> BlockReduce; // Allocate shared memory for BlockReduce //__shared__ typename BlockReduce::TempStorage temp_storage; //int aggregate = BlockReduce(temp_storage).Sum(thread_data); int i = blockIdx.x; int tid = threadIdx.x; uint2 corner; corner.x = bin_location[i]%grid_size.x; corner.y = bin_location[i]/grid_size.x; const int idx = binned_points_idx[i]; const int ppb = points_per_bin[i]; // cusp::complex<float> * value; // small bin or large no of samples if(bin_dimension_x[i]*bin_dimension_y[i] < 64 || points_per_bin[i] > SHARED_SIZE){ __shared__ cusp::complex<float> sum_t[BLOCKSIZE]; // loop through grid for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){ for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){ if( 0){ if(tid == 0){ sum_points<<<1,blockDim>>>(binned_points_x,binned_points_y,point_value,grid_size, binned_points,ppb,idx,x,y, kb_table_size,kb_table_scale,texRef, grid_value); __syncthreads(); cudaDeviceSynchronize(); __syncthreads(); // printf("sum = %g\n", grid_value[y*grid_size.x+x]); } }else{ sum_t[tid] = 0; for(int j = tid;j<ppb;j+=blockDim.x){ sum_t[tid] += point_value[binned_points[idx+j]]* kb_weight(make_float2(x,y), make_float2(binned_points_x[idx+j],binned_points_y[idx+j]), kb_table_size,kb_table_scale,texRef); } for(unsigned int j=1; j < blockDim.x; j *= 2) { // modulo arithmetic is slow! if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; } __syncthreads(); } if(tid == 0){grid_value[y*grid_size.x+x]+=sum_t[0]; } } } } // large dimensions }else if(bin_dimension_x[i]*bin_dimension_y[i] >BLOCKSIZE/2-1) { __shared__ float point_pos_cache_x[SHARED_SIZE]; __shared__ float point_pos_cache_y[SHARED_SIZE]; __shared__ cusp::complex<float> point_value_cache[SHARED_SIZE]; __shared__ cusp::complex<float> sum_t[BLOCKSIZE]; // Lets try to load all points to shared memory / for(int j = tid;j<ppb;j+= blockDim.x){ const int point = binned_points[idx+j]; point_value_cache[j] = point_value[point]; point_pos_cache_x[j] = binned_points_x[idx+j]; point_pos_cache_y[j] = binned_points_y[idx+j]; } __syncthreads(); const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]}; // loop through dimensions for(int k = tid;k<dims.x*dims.y;k+=blockDim.x){ const int x = (k%(dims.x))+corner.x; const int y = (k/dims.x)+corner.y; cusp::complex<float> my_sum = 0; for(int j = 0;j<ppb;j++){ //loop through all the points float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,texRef); my_sum += point_value_cache[j]*w; } grid_value[y*grid_size.x+x] = my_sum; } }else{ //small dimension and few points __shared__ float point_pos_cache_x[SHARED_SIZE]; __shared__ float point_pos_cache_y[SHARED_SIZE]; __shared__ cusp::complex<float> point_value_cache[SHARED_SIZE]; __shared__ cusp::complex<float> sum_t[BLOCKSIZE]; // Lets try to load things to shared memory / for(int j = tid;j<ppb;j+= blockDim.x){ const int point = binned_points[idx+j]; point_value_cache[j] = point_value[point]; point_pos_cache_x[j] = binned_points_x[idx+j]; point_pos_cache_y[j] = binned_points_y[idx+j]; } __syncthreads(); const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]}; int b = 4; for(int k = tid/b;k<dims.x*dims.y;k+=blockDim.x/b){ const int x = (k%(dims.x))+corner.x; const int y = (k/dims.x)+corner.y; sum_t[tid] = 0; //sum_i[tid] = 0; for(int j = (tid&(b-1));j<ppb;j+=b){ float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,texRef); sum_t[tid] += point_value_cache[j]*w; } // Do a reduce in shared memory for(unsigned int j=1; j < b; j = (j << 1)) { // modulo arithmetic is slow! if ((tid & ((j<<1)-1)) == 0) { sum_t[tid] += sum_t[tid + j]; } __syncthreads(); } if((tid&(b-1)) == 0){ grid_value[y*grid_size.x+x] = sum_t[tid]; } } } } void grid_points_cuda_interleaved_mex(const float * point_pos_x, const float * point_pos_y, const cusp::complex<float> * point_value, int npoints, uint2 grid_size, const int * points_per_bin, const int * bin_dimension_x, const int * bin_dimension_y, const int * binned_points, const int * binned_points_idx, const int * bin_location, const float * binned_points_x, const float * binned_points_y, int nbins, const float * kb_table, const int kb_table_size, const float kb_table_scale, cudaTextureObject_t texRef, cusp::complex<float> * grid_value){ cudaMemset(grid_value,0,sizeof(float2)*grid_size.x*grid_size.y); /* size_t offset; cudaBindTexture(&offset,texRef, kb_table, sizeof(float)*kb_table_size); if(offset != 0){ // printf("Error: Texture offset different than zero. Table not allocated with cudaMalloc!%d\n"); return; } */ int grid = nbins; int block_size = BLOCKSIZE; clock_t t_i = clock(); grid_points_cuda_mex_interleaved_kernel<<<grid,block_size>>>( point_pos_x, point_pos_y, point_value, npoints, grid_size, points_per_bin, bin_dimension_x, bin_dimension_y, binned_points, binned_points_idx, bin_location, binned_points_x, binned_points_y,nbins, kb_table_size, kb_table_scale,texRef, grid_value); cudaThreadSynchronize(); clock_t t_e = clock(); error_handle(); // printf("%d iter in %5.1f ms\n",iter,(t_e-t_i)*1000.0/CLOCKS_PER_SEC); } #define SX prhs[0] #define SY prhs[1] #define SV prhs[2] #define GRID_DIM prhs[3] #define SPB prhs[4] #define BIN_DIM_X prhs[5] #define BIN_DIM_Y prhs[6] #define SIB prhs[7] #define BSO prhs[8] #define BL prhs[9] #define BPX prhs[10] #define BPY prhs[11] #define KLUT prhs[12] #define KLUTS prhs[13] void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){ /* Initialize the MathWorks GPU API. */ mxInitGPU(); mxGPUArray const *samples_x; mxGPUArray const *samples_y; mxGPUArray const *samples_values; mxGPUArray const *samples_per_bin; mxGPUArray const *bin_dimensions_x; mxGPUArray const *bin_dimensions_y; mxGPUArray const *samples_in_bin; mxGPUArray const *bin_start_offset; mxGPUArray const *bin_location; mxGPUArray const *bin_points_x; mxGPUArray const *bin_points_y; mxGPUArray const *kernel_lookup_table; //int *grid_dim =(int *) mxGetPr(GRID_DIM); float kernel_lookup_table_scale = mxGetScalar(KLUTS); int *grid_dim0=( int *) (mxGetData(GRID_DIM)); mwSize *grid_dim=(mwSize *)grid_dim0; //mexPrintf("Grid Dimensions %d x %d\n",(grid_dim[0]),(grid_dim[1])); //mexPrintf("Grid Dimensions %d x %d\n",(grid_dim1[0]),(grid_dim1[1])); // OUTPUT mxGPUArray *grid_values; samples_x = mxGPUCreateFromMxArray(SX); samples_y = mxGPUCreateFromMxArray(SY); samples_values = mxGPUCreateFromMxArray(SV); samples_per_bin = mxGPUCreateFromMxArray(SPB); bin_dimensions_x = mxGPUCreateFromMxArray(BIN_DIM_X); bin_dimensions_y = mxGPUCreateFromMxArray(BIN_DIM_Y); samples_in_bin = mxGPUCreateFromMxArray(SIB); bin_start_offset = mxGPUCreateFromMxArray(BSO); bin_location = mxGPUCreateFromMxArray(BL); bin_points_x = mxGPUCreateFromMxArray(BPX); bin_points_y = mxGPUCreateFromMxArray(BPY); kernel_lookup_table= mxGPUCreateFromMxArray(KLUT); int nbins = (int) (mxGPUGetNumberOfElements(bin_dimensions_x)); int npoints = (int)(mxGPUGetNumberOfElements(samples_x)); int kernel_lookup_table_size = ( int)(mxGPUGetNumberOfElements(kernel_lookup_table)); mwSize ndim= 2; // output: // float2 * grid_values; // float2 * gold_grid_values; // plhs[0] = jkt_new( grid_dim[0], grid_dim[1], mxSINGLE_CLASS, mxREAL,); //grid_values= mxGPUCreateGPUArray(ndim,grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); // now get the pointer or whatever it is const float *d_samples_x = (const float *)(mxGPUGetDataReadOnly(samples_x)); const float *d_samples_y = (const float *)(mxGPUGetDataReadOnly(samples_y)); // float2 *d_samples_values = (float2 *)(const float2 *)(mxGPUGetDataReadOnly(samples_values)); const cusp::complex<float> *d_samples_values = (const cusp::complex<float> *)(mxGPUGetDataReadOnly(samples_values)); const int * d_samples_per_bin = (const int *)(mxGPUGetDataReadOnly(samples_per_bin)); const int * d_bin_dimensions_x = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_x)); const int * d_bin_dimensions_y = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_y)); const int * d_samples_in_bin = (const int *)(mxGPUGetDataReadOnly(samples_in_bin)); const int * d_bin_start_offset =(const int *)(mxGPUGetDataReadOnly(bin_start_offset)); const int * d_bin_location = (const int *)(mxGPUGetDataReadOnly(bin_location)); const float * d_bin_points_x = (const float *)(mxGPUGetDataReadOnly(bin_points_x)); const float * d_bin_points_y = (const float *)(mxGPUGetDataReadOnly(bin_points_y)); float * d_kernel_lookup_table = ( float *)(mxGPUGetDataReadOnly(kernel_lookup_table)); const uint2 grid_size = {grid_dim[0],grid_dim[1]}; //float2 * d_grid_values = (float2 *)(mxGPUGetData(grid_values)); cusp::complex<float> * d_grid_values = (cusp::complex<float> *)(mxGPUGetData(grid_values)); //-------------------------------------------- // // create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = d_kernel_lookup_table; resDesc.res.linear.desc.f = cudaChannelFormatKindFloat; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.sizeInBytes = kernel_lookup_table_size*sizeof(float); cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; // create texture object: we only have to do this once! cudaTextureObject_t texRef=0; cudaCreateTextureObject(&texRef, &resDesc, &texDesc, NULL); //-------------------------------------------- grid_points_cuda_interleaved_mex( d_samples_x, d_samples_y, d_samples_values, npoints, grid_size, d_samples_per_bin, d_bin_dimensions_x, d_bin_dimensions_y, d_samples_in_bin, d_bin_start_offset, d_bin_location, d_bin_points_x, d_bin_points_y, nbins, d_kernel_lookup_table, kernel_lookup_table_size, kernel_lookup_table_scale,texRef, d_grid_values); //mexErrMsgTxt("gpuArray 2"); plhs[0] = mxGPUCreateMxArrayOnGPU(grid_values); /* */ mxGPUDestroyGPUArray( samples_x); mxGPUDestroyGPUArray( samples_y); mxGPUDestroyGPUArray( samples_values); mxGPUDestroyGPUArray( samples_per_bin); mxGPUDestroyGPUArray( bin_dimensions_x); mxGPUDestroyGPUArray( bin_dimensions_y); mxGPUDestroyGPUArray( samples_in_bin); mxGPUDestroyGPUArray( kernel_lookup_table); mxGPUDestroyGPUArray( bin_start_offset); mxGPUDestroyGPUArray( bin_location); mxGPUDestroyGPUArray( bin_points_x); mxGPUDestroyGPUArray( bin_points_y); mxGPUDestroyGPUArray( grid_values); }
373174108855a7552039bb31eebdd11e00944e94.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/hip/SortStable.h> #include <ATen/Dispatch.h> #include <ATen/core/Array.h> #include <ATen/core/TensorBase.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/cub.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/SortUtils.cuh> #include <ATen/native/hip/SortingCommon.cuh> #include <c10/core/DeviceArray.h> #include <limits> namespace at { namespace native { namespace { struct offset_t { int stride; int begin; __device__ int operator[](int i) { return stride * (begin + i); } }; // Segmented sort by full sort algorithm:. // Say we are sorting a (2, 3) tensor. We have in flattened form: // values 0.4 1.2 5.3 6.2 1.3 2.3 // indices 0 1 2 0 1 2 // segment_id 0 0 0 1 1 1 // First we sort by values, globally: // values 6.2 5.3 2.3 1.2 1.3 0.4 // indices 0 2 2 1 1 0 // segment_id 1 0 1 0 1 0 // Then we stable sort by segment id: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 2 1 0 0 2 1 // segment_id 0 0 0 1 1 1 // This method can only work if the slice we are sorting (`dim`) is // innermost, and both values and indices are contiguous. We do this // by re-arranging the input into this form as needed, which will // unfortunately allocate memory if the request is not in this form. // Vectorized sort is slower than iterated sort if the number of // slices is small (since we're sorting twice, instead of invoking a // smaller sort `numSlices` times), but the cub sort // implementation here is a catch-all, so we're not looking for // efficiency, but instead correctness. template <typename scalar_t> __global__ void sort_postprocess_kernel( const scalar_t* in, scalar_t* out, int64_t* index, const int2* i_s_ptr, int nsegments, int nsort) { CUDA_KERNEL_LOOP(i, nsegments * nsort) { int segment = i / nsort; int j = i % nsort; int offset = segment * nsort; const scalar_t* in_ = in + offset; scalar_t* out_ = out + offset; int64_t* index_ = index + offset; const int2* i_s_ptr_ = i_s_ptr + offset; int idx = i_s_ptr_[j].y; index_[j] = idx; out_[j] = in_[idx]; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_index_and_segment_kernel( int2* data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { auto div_mod = nsort_divider.divmod(idx); auto segment = static_cast<int>(div_mod.div); auto sort = static_cast<int>(div_mod.mod); data[idx] = int2{segment, sort}; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_reverse_indices_kernel( int64_t* data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { data[idx] = nsort_divider.mod(idx); } } template <typename scalar_t> inline void segmented_sort_large_segments( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t* self_ptr, scalar_t* values_ptr, int64_t* indices_ptr) { using namespace at::cuda::detail; auto allocator = at::cuda::getCUDADeviceAllocator(); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(nsort); c10::DeviceArray<int64_t> indices(*allocator, nsort); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); hipLaunchKernelGGL(( fill_reverse_indices_kernel), dim3(grid), dim3(block), 0, stream, indices.get(), nsort, nsort_divider); const int64_t* initial_indices = indices.get(); for (auto i : c10::irange(nsegments)) { at::cuda::cub::radix_sort_pairs<scalar_t, int64_t>( self_ptr, values_ptr, initial_indices, indices_ptr, nsort, descending); indices_ptr += nsort; self_ptr += nsort; values_ptr += nsort; } } template <typename scalar_t> inline void segmented_sort_pairs_by_full_sort( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t* const self_ptr, scalar_t* const values_ptr, int64_t* const indices_ptr) { int64_t segment_bits = std::max<int64_t>( 1L, static_cast<int64_t>(::ceil(std::log2(nsegments)))); const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto indices_and_segment = cuda_allocator->allocate(numel * sizeof(int2)); auto i_s_ptr = static_cast<int2*>(indices_and_segment.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); hipLaunchKernelGGL(( fill_index_and_segment_kernel), dim3(grid), dim3(block), 0, stream, i_s_ptr, numel, nsort_divider); auto indices_and_segment2 = cuda_allocator->allocate(nsegments * nsort * sizeof(int2)); auto i_s_ptr2 = static_cast<int2*>(indices_and_segment2.get()); at::cuda::cub::radix_sort_pairs<scalar_t, int2>( self_ptr, nullptr, i_s_ptr, i_s_ptr2, n, descending); TORCH_INTERNAL_ASSERT(segment_bits <= 32); // sort on lower 32bits, i.e. segment index at::cuda::cub::radix_sort_keys<int64_t>( reinterpret_cast<int64_t*>(i_s_ptr2), reinterpret_cast<int64_t*>(i_s_ptr), n, false, 0, segment_bits); hipLaunchKernelGGL(( sort_postprocess_kernel), dim3((n + 511) / 512), dim3(512), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_ptr, values_ptr, indices_ptr, i_s_ptr, nsegments, nsort); } template <typename scalar_t> void segmented_sort_pairs( int64_t nsegments, int64_t nsort, int64_t n, bool descending, const scalar_t* self_ptr, scalar_t* values_ptr, int64_t* indices_ptr) { const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto reverse_indices = cuda_allocator->allocate(numel * sizeof(int64_t)); int64_t* reverse_indices_ptr = static_cast<int64_t*>(reverse_indices.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); hipLaunchKernelGGL(( fill_reverse_indices_kernel), dim3(grid), dim3(block), 0, stream, reverse_indices_ptr, numel, nsort_divider); at::cuda::cub::segmented_sort_pairs( self_ptr, values_ptr, reverse_indices_ptr, indices_ptr, n, nsegments, offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending); } } // namespace void launch_stable_sort_kernel( const TensorBase& self, int64_t dim, bool descending, const TensorBase& values, const TensorBase& indices) { const auto numel = self.numel(); if (numel == 0) { return; } int64_t numel_or_intmax = ::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max())); int64_t nsort = self.size(dim); int64_t nbatch = (numel_or_intmax / nsort) * nsort; TORCH_CHECK(nbatch > 0, "Cannot sort dimension of length ", nsort); int64_t* indices_ptr = indices.data_ptr<int64_t>(); #if (defined(USE_ROCM) && ROCM_VERSION < 40500) constexpr bool is_rocm_bf16_sort_unsupported = true; #else constexpr bool is_rocm_bf16_sort_unsupported = false; #endif AT_DISPATCH_ALL_TYPES_AND3( kBool, kHalf, kBFloat16, self.scalar_type(), "sort", [&] { c10::guts::if_constexpr<!( is_rocm_bf16_sort_unsupported && std::is_same<scalar_t, c10::BFloat16>::value)>( [&](auto _) { const scalar_t* self_ptr = self.data_ptr<scalar_t>(); scalar_t* values_ptr = values.data_ptr<scalar_t>(); int64_t remaining = _(numel); while (remaining > 0) { int64_t n = ::min(remaining, nbatch); int64_t nsegments = n / nsort; if (nsegments == 1 || nsort >= 1000000) { // rough heuristics where even a single // sort occupies GPU segmented_sort_large_segments( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else if (nsegments < 128) { segmented_sort_pairs_by_full_sort( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else { segmented_sort_pairs( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } remaining -= n; self_ptr += n; values_ptr += n; indices_ptr += n; } }, [&](auto _) { TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm < 4.5"); }); }); } } // namespace native } // namespace at
373174108855a7552039bb31eebdd11e00944e94.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/cuda/SortStable.h> #include <ATen/Dispatch.h> #include <ATen/core/Array.h> #include <ATen/core/TensorBase.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/cub.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/SortUtils.cuh> #include <ATen/native/cuda/SortingCommon.cuh> #include <c10/core/DeviceArray.h> #include <limits> namespace at { namespace native { namespace { struct offset_t { int stride; int begin; __device__ int operator[](int i) { return stride * (begin + i); } }; // Segmented sort by full sort algorithm:. // Say we are sorting a (2, 3) tensor. We have in flattened form: // values 0.4 1.2 5.3 6.2 1.3 2.3 // indices 0 1 2 0 1 2 // segment_id 0 0 0 1 1 1 // First we sort by values, globally: // values 6.2 5.3 2.3 1.2 1.3 0.4 // indices 0 2 2 1 1 0 // segment_id 1 0 1 0 1 0 // Then we stable sort by segment id: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 2 1 0 0 2 1 // segment_id 0 0 0 1 1 1 // This method can only work if the slice we are sorting (`dim`) is // innermost, and both values and indices are contiguous. We do this // by re-arranging the input into this form as needed, which will // unfortunately allocate memory if the request is not in this form. // Vectorized sort is slower than iterated sort if the number of // slices is small (since we're sorting twice, instead of invoking a // smaller sort `numSlices` times), but the cub sort // implementation here is a catch-all, so we're not looking for // efficiency, but instead correctness. template <typename scalar_t> __global__ void sort_postprocess_kernel( const scalar_t* in, scalar_t* out, int64_t* index, const int2* i_s_ptr, int nsegments, int nsort) { CUDA_KERNEL_LOOP(i, nsegments * nsort) { int segment = i / nsort; int j = i % nsort; int offset = segment * nsort; const scalar_t* in_ = in + offset; scalar_t* out_ = out + offset; int64_t* index_ = index + offset; const int2* i_s_ptr_ = i_s_ptr + offset; int idx = i_s_ptr_[j].y; index_[j] = idx; out_[j] = in_[idx]; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_index_and_segment_kernel( int2* data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { auto div_mod = nsort_divider.divmod(idx); auto segment = static_cast<int>(div_mod.div); auto sort = static_cast<int>(div_mod.mod); data[idx] = int2{segment, sort}; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_reverse_indices_kernel( int64_t* data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { data[idx] = nsort_divider.mod(idx); } } template <typename scalar_t> inline void segmented_sort_large_segments( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t* self_ptr, scalar_t* values_ptr, int64_t* indices_ptr) { using namespace at::cuda::detail; auto allocator = at::cuda::getCUDADeviceAllocator(); auto stream = at::cuda::getCurrentCUDAStream(); dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(nsort); c10::DeviceArray<int64_t> indices(*allocator, nsort); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); fill_reverse_indices_kernel<<<grid, block, 0, stream>>>( indices.get(), nsort, nsort_divider); const int64_t* initial_indices = indices.get(); for (auto i : c10::irange(nsegments)) { at::cuda::cub::radix_sort_pairs<scalar_t, int64_t>( self_ptr, values_ptr, initial_indices, indices_ptr, nsort, descending); indices_ptr += nsort; self_ptr += nsort; values_ptr += nsort; } } template <typename scalar_t> inline void segmented_sort_pairs_by_full_sort( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t* const self_ptr, scalar_t* const values_ptr, int64_t* const indices_ptr) { int64_t segment_bits = std::max<int64_t>( 1L, static_cast<int64_t>(std::ceil(std::log2(nsegments)))); const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto indices_and_segment = cuda_allocator->allocate(numel * sizeof(int2)); auto i_s_ptr = static_cast<int2*>(indices_and_segment.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::cuda::getCurrentCUDAStream(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); fill_index_and_segment_kernel<<<grid, block, 0, stream>>>( i_s_ptr, numel, nsort_divider); auto indices_and_segment2 = cuda_allocator->allocate(nsegments * nsort * sizeof(int2)); auto i_s_ptr2 = static_cast<int2*>(indices_and_segment2.get()); at::cuda::cub::radix_sort_pairs<scalar_t, int2>( self_ptr, nullptr, i_s_ptr, i_s_ptr2, n, descending); TORCH_INTERNAL_ASSERT(segment_bits <= 32); // sort on lower 32bits, i.e. segment index at::cuda::cub::radix_sort_keys<int64_t>( reinterpret_cast<int64_t*>(i_s_ptr2), reinterpret_cast<int64_t*>(i_s_ptr), n, false, 0, segment_bits); sort_postprocess_kernel<<< (n + 511) / 512, 512, 0, at::cuda::getCurrentCUDAStream()>>>( self_ptr, values_ptr, indices_ptr, i_s_ptr, nsegments, nsort); } template <typename scalar_t> void segmented_sort_pairs( int64_t nsegments, int64_t nsort, int64_t n, bool descending, const scalar_t* self_ptr, scalar_t* values_ptr, int64_t* indices_ptr) { const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto reverse_indices = cuda_allocator->allocate(numel * sizeof(int64_t)); int64_t* reverse_indices_ptr = static_cast<int64_t*>(reverse_indices.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::cuda::getCurrentCUDAStream(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); fill_reverse_indices_kernel<<<grid, block, 0, stream>>>( reverse_indices_ptr, numel, nsort_divider); at::cuda::cub::segmented_sort_pairs( self_ptr, values_ptr, reverse_indices_ptr, indices_ptr, n, nsegments, offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending); } } // namespace void launch_stable_sort_kernel( const TensorBase& self, int64_t dim, bool descending, const TensorBase& values, const TensorBase& indices) { const auto numel = self.numel(); if (numel == 0) { return; } int64_t numel_or_intmax = std::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max())); int64_t nsort = self.size(dim); int64_t nbatch = (numel_or_intmax / nsort) * nsort; TORCH_CHECK(nbatch > 0, "Cannot sort dimension of length ", nsort); int64_t* indices_ptr = indices.data_ptr<int64_t>(); #if (defined(USE_ROCM) && ROCM_VERSION < 40500) constexpr bool is_rocm_bf16_sort_unsupported = true; #else constexpr bool is_rocm_bf16_sort_unsupported = false; #endif AT_DISPATCH_ALL_TYPES_AND3( kBool, kHalf, kBFloat16, self.scalar_type(), "sort", [&] { c10::guts::if_constexpr<!( is_rocm_bf16_sort_unsupported && std::is_same<scalar_t, c10::BFloat16>::value)>( [&](auto _) { const scalar_t* self_ptr = self.data_ptr<scalar_t>(); scalar_t* values_ptr = values.data_ptr<scalar_t>(); int64_t remaining = _(numel); while (remaining > 0) { int64_t n = std::min(remaining, nbatch); int64_t nsegments = n / nsort; if (nsegments == 1 || nsort >= 1000000) { // rough heuristics where even a single // sort occupies GPU segmented_sort_large_segments( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else if (nsegments < 128) { segmented_sort_pairs_by_full_sort( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else { segmented_sort_pairs( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } remaining -= n; self_ptr += n; values_ptr += n; indices_ptr += n; } }, [&](auto _) { TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm < 4.5"); }); }); } } // namespace native } // namespace at
acf5e64852590585bfdadfab77b779e676d30fce.hip
// !!! This is a file automatically generated by hipify!!! #include "BufferCompaction.h" #include "GpuMemUtils.h" #include "GpuRtConstants.h" #include "ResultSetBufferAccessors.h" #include "ResultSetSortImpl.h" #include "SortUtils.cuh" #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #define FORCE_CPU_VERSION #include "BufferEntryUtils.h" #undef FORCE_CPU_VERSION namespace { template <class K, class V, class I> std::vector<uint32_t> do_radix_sort(const ExecutorDeviceType device_type, ThrustAllocator& thrust_allocator, const int8_t* groupby_buffer, V dev_oe_col_buffer_begin, V dev_oe_col_buffer_end, I dev_idx_buff_begin, const size_t dev_idx_buff_size, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n) { if (dev_idx_buff_size == 0) { return {}; } if (oe.is_desc) { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } else { thrust::sort_by_key(dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } } else { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } else { thrust::sort_by_key( dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } } // Speculatively transfer only the top_n first, most of the time it'll be enough. thrust::host_vector<uint32_t> host_vector_result( dev_idx_buff_begin, dev_idx_buff_begin + ::min(top_n, dev_idx_buff_size)); // Sometimes, radix sort can bring to the front entries which are empty. // For example, ascending sort on COUNT(*) will bring non-existent groups // to the front of dev_idx_buff since they're 0 in our system. Re-do the // transfer in that case to bring the entire dev_idx_buff; existing logic // in row iteration will take care of skipping the empty rows. for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { host_vector_result = thrust::host_vector<uint32_t>( dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size); break; } } std::vector<uint32_t> result; result.reserve(::min(top_n, host_vector_result.size())); for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (!is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { result.push_back(entry_idx); if (result.size() >= top_n) { break; } } } return result; } void add_nulls(std::vector<uint32_t>& idx_buff, const std::vector<uint32_t>& null_idx_buff, const PodOrderEntry& oe) { if (null_idx_buff.empty()) { return; } const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end(); idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end()); } template <typename T> thrust::device_ptr<T> get_device_copy_ptr(const thrust::host_vector<T>& host_vec, ThrustAllocator& thrust_allocator) { if (host_vec.empty()) { return thrust::device_ptr<T>(static_cast<T*>(nullptr)); } const auto host_vec_bytes = host_vec.size() * sizeof(T); T* dev_ptr = reinterpret_cast<T*>( thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes))); copy_to_gpu(thrust_allocator.getDataMgr(), reinterpret_cast<hipDeviceptr_t>(dev_ptr), &host_vec[0], host_vec_bytes, thrust_allocator.getDeviceId()); return thrust::device_ptr<T>(dev_ptr); } template <class K> std::vector<uint32_t> baseline_sort_fp(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { thrust::host_vector<uint32_t> neg_idx_buff; thrust::host_vector<uint32_t> pos_idx_buff; std::vector<uint32_t> null_idx_buff; thrust::host_vector<int64_t> neg_oe_col_buffer; thrust::host_vector<int64_t> pos_oe_col_buffer; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); neg_idx_buff.reserve(slice_entry_count); pos_idx_buff.reserve(slice_entry_count); null_idx_buff.reserve(slice_entry_count); neg_oe_col_buffer.reserve(slice_entry_count); pos_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; const auto& oe_info = layout.oe_target_info; const auto col_ti = oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type; // Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a // double const bool float_argument_input = takes_float_argument(oe_info) && oe_info.agg_kind != kAVG; auto is_negative = float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; } : [](const int64_t v) -> bool { return v < 0; }; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(col_ti, float_argument_input)) { null_idx_buff.push_back(i); continue; } if (is_negative(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for // integer and floating point neg_idx_buff.push_back(i); neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } else { pos_idx_buff.push_back(i); pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> pos_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator); const auto dev_pos_oe_col_buffer = get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_pos_oe_col_buffer, dev_pos_oe_col_buffer + pos_oe_col_buffer.size(), dev_pos_idx_buff, pos_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, pos_oe_col_buffer.begin(), pos_oe_col_buffer.end(), pos_idx_buff.begin(), pos_idx_buff.size(), oe, layout, top_n); } std::vector<uint32_t> neg_result; PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first}; if (device_type == ExecutorDeviceType::GPU) { const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator); const auto dev_neg_oe_col_buffer = get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_neg_oe_col_buffer, dev_neg_oe_col_buffer + neg_oe_col_buffer.size(), dev_neg_idx_buff, neg_idx_buff.size(), reverse_oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, neg_oe_col_buffer.begin(), neg_oe_col_buffer.end(), neg_idx_buff.begin(), neg_idx_buff.size(), reverse_oe, layout, top_n); } if (oe.is_desc) { pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end()); add_nulls(pos_result, null_idx_buff, oe); return pos_result; } neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end()); add_nulls(neg_result, null_idx_buff, oe); return neg_result; } template <class K> std::vector<uint32_t> baseline_sort_int(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { const auto& entry_ti = get_compact_type(layout.oe_target_info); std::vector<uint32_t> null_idx_buff; thrust::host_vector<uint32_t> notnull_idx_buff; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); null_idx_buff.reserve(slice_entry_count); notnull_idx_buff.reserve(slice_entry_count); thrust::host_vector<int64_t> notnull_oe_col_buffer; notnull_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) { null_idx_buff.push_back(i); } else { notnull_idx_buff.push_back(i); notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> notnull_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_notnull_idx_buff = get_device_copy_ptr(notnull_idx_buff, thrust_allocator); const auto dev_notnull_oe_col_buffer = get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_notnull_oe_col_buffer, dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(), dev_notnull_idx_buff, notnull_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, notnull_oe_col_buffer.begin(), notnull_oe_col_buffer.end(), notnull_idx_buff.begin(), notnull_idx_buff.size(), oe, layout, top_n); } add_nulls(notnull_result, null_idx_buff, oe); return notnull_result; } template <class K> thrust::host_vector<int64_t> collect_order_entry_column( const int8_t* groupby_buffer, const GroupByBufferLayoutInfo& layout, const size_t start, const size_t step) { thrust::host_vector<int64_t> oe_col_buffer; const auto row_ptr = groupby_buffer + start * layout.row_bytes; auto crt_group_ptr1 = layout.target_groupby_index >= 0 ? row_ptr + layout.target_groupby_index * sizeof(K) : row_ptr + layout.col_off; const int8_t* crt_group_ptr2{nullptr}; if (layout.oe_target_info.agg_kind == kAVG) { crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes; } const auto entry_ti = get_compact_type(layout.oe_target_info); const bool float_argument_input = takes_float_argument(layout.oe_target_info); const auto step_bytes = layout.row_bytes * step; const auto col_bytes = float_argument_input ? entry_ti.get_size() : layout.col_bytes; for (size_t i = start; i < layout.entry_count; i += step) { auto val1 = read_int_from_buff(crt_group_ptr1, col_bytes > 0 ? col_bytes : sizeof(K)); if (crt_group_ptr2) { const auto val2 = read_int_from_buff(crt_group_ptr2, 8); const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input); val1 = *reinterpret_cast<const int64_t*>(&avg_val); } oe_col_buffer.push_back(val1); crt_group_ptr1 += step_bytes; if (crt_group_ptr2) { crt_group_ptr2 += step_bytes; } } return oe_col_buffer; } } // namespace template <class K> std::vector<uint32_t> baseline_sort(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step); const auto& entry_ti = get_compact_type(layout.oe_target_info); CHECK(entry_ti.is_number()); if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) { return baseline_sort_fp<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } // Because of how we represent nulls for integral types, they'd be at the // wrong position in these two cases. Separate them into a different buffer. if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) { return baseline_sort_int<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } ThrustAllocator thrust_allocator(data_mgr, device_id); // Fastest path, no need to separate nulls away since they'll end up at the // right place as a side effect of how we're representing nulls. if (device_type == ExecutorDeviceType::GPU) { if (oe_col_buffer.empty()) { return {}; } const auto dev_idx_buff = get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator); thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step); const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_oe_col_buffer, dev_oe_col_buffer + oe_col_buffer.size(), dev_idx_buff, oe_col_buffer.size(), oe, layout, top_n); } CHECK(device_type == ExecutorDeviceType::CPU); thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size()); thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, oe_col_buffer.begin(), oe_col_buffer.end(), host_idx_buff.begin(), host_idx_buff.size(), oe, layout, top_n); } template std::vector<uint32_t> baseline_sort<int32_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step); template std::vector<uint32_t> baseline_sort<int64_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step);
acf5e64852590585bfdadfab77b779e676d30fce.cu
#include "BufferCompaction.h" #include "GpuMemUtils.h" #include "GpuRtConstants.h" #include "ResultSetBufferAccessors.h" #include "ResultSetSortImpl.h" #include "SortUtils.cuh" #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #define FORCE_CPU_VERSION #include "BufferEntryUtils.h" #undef FORCE_CPU_VERSION namespace { template <class K, class V, class I> std::vector<uint32_t> do_radix_sort(const ExecutorDeviceType device_type, ThrustAllocator& thrust_allocator, const int8_t* groupby_buffer, V dev_oe_col_buffer_begin, V dev_oe_col_buffer_end, I dev_idx_buff_begin, const size_t dev_idx_buff_size, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n) { if (dev_idx_buff_size == 0) { return {}; } if (oe.is_desc) { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } else { thrust::sort_by_key(dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } } else { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } else { thrust::sort_by_key( dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } } // Speculatively transfer only the top_n first, most of the time it'll be enough. thrust::host_vector<uint32_t> host_vector_result( dev_idx_buff_begin, dev_idx_buff_begin + std::min(top_n, dev_idx_buff_size)); // Sometimes, radix sort can bring to the front entries which are empty. // For example, ascending sort on COUNT(*) will bring non-existent groups // to the front of dev_idx_buff since they're 0 in our system. Re-do the // transfer in that case to bring the entire dev_idx_buff; existing logic // in row iteration will take care of skipping the empty rows. for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { host_vector_result = thrust::host_vector<uint32_t>( dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size); break; } } std::vector<uint32_t> result; result.reserve(std::min(top_n, host_vector_result.size())); for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (!is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { result.push_back(entry_idx); if (result.size() >= top_n) { break; } } } return result; } void add_nulls(std::vector<uint32_t>& idx_buff, const std::vector<uint32_t>& null_idx_buff, const PodOrderEntry& oe) { if (null_idx_buff.empty()) { return; } const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end(); idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end()); } template <typename T> thrust::device_ptr<T> get_device_copy_ptr(const thrust::host_vector<T>& host_vec, ThrustAllocator& thrust_allocator) { if (host_vec.empty()) { return thrust::device_ptr<T>(static_cast<T*>(nullptr)); } const auto host_vec_bytes = host_vec.size() * sizeof(T); T* dev_ptr = reinterpret_cast<T*>( thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes))); copy_to_gpu(thrust_allocator.getDataMgr(), reinterpret_cast<CUdeviceptr>(dev_ptr), &host_vec[0], host_vec_bytes, thrust_allocator.getDeviceId()); return thrust::device_ptr<T>(dev_ptr); } template <class K> std::vector<uint32_t> baseline_sort_fp(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { thrust::host_vector<uint32_t> neg_idx_buff; thrust::host_vector<uint32_t> pos_idx_buff; std::vector<uint32_t> null_idx_buff; thrust::host_vector<int64_t> neg_oe_col_buffer; thrust::host_vector<int64_t> pos_oe_col_buffer; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); neg_idx_buff.reserve(slice_entry_count); pos_idx_buff.reserve(slice_entry_count); null_idx_buff.reserve(slice_entry_count); neg_oe_col_buffer.reserve(slice_entry_count); pos_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; const auto& oe_info = layout.oe_target_info; const auto col_ti = oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type; // Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a // double const bool float_argument_input = takes_float_argument(oe_info) && oe_info.agg_kind != kAVG; auto is_negative = float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; } : [](const int64_t v) -> bool { return v < 0; }; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(col_ti, float_argument_input)) { null_idx_buff.push_back(i); continue; } if (is_negative(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for // integer and floating point neg_idx_buff.push_back(i); neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } else { pos_idx_buff.push_back(i); pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> pos_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator); const auto dev_pos_oe_col_buffer = get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_pos_oe_col_buffer, dev_pos_oe_col_buffer + pos_oe_col_buffer.size(), dev_pos_idx_buff, pos_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, pos_oe_col_buffer.begin(), pos_oe_col_buffer.end(), pos_idx_buff.begin(), pos_idx_buff.size(), oe, layout, top_n); } std::vector<uint32_t> neg_result; PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first}; if (device_type == ExecutorDeviceType::GPU) { const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator); const auto dev_neg_oe_col_buffer = get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_neg_oe_col_buffer, dev_neg_oe_col_buffer + neg_oe_col_buffer.size(), dev_neg_idx_buff, neg_idx_buff.size(), reverse_oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, neg_oe_col_buffer.begin(), neg_oe_col_buffer.end(), neg_idx_buff.begin(), neg_idx_buff.size(), reverse_oe, layout, top_n); } if (oe.is_desc) { pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end()); add_nulls(pos_result, null_idx_buff, oe); return pos_result; } neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end()); add_nulls(neg_result, null_idx_buff, oe); return neg_result; } template <class K> std::vector<uint32_t> baseline_sort_int(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { const auto& entry_ti = get_compact_type(layout.oe_target_info); std::vector<uint32_t> null_idx_buff; thrust::host_vector<uint32_t> notnull_idx_buff; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); null_idx_buff.reserve(slice_entry_count); notnull_idx_buff.reserve(slice_entry_count); thrust::host_vector<int64_t> notnull_oe_col_buffer; notnull_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) { null_idx_buff.push_back(i); } else { notnull_idx_buff.push_back(i); notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> notnull_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_notnull_idx_buff = get_device_copy_ptr(notnull_idx_buff, thrust_allocator); const auto dev_notnull_oe_col_buffer = get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_notnull_oe_col_buffer, dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(), dev_notnull_idx_buff, notnull_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, notnull_oe_col_buffer.begin(), notnull_oe_col_buffer.end(), notnull_idx_buff.begin(), notnull_idx_buff.size(), oe, layout, top_n); } add_nulls(notnull_result, null_idx_buff, oe); return notnull_result; } template <class K> thrust::host_vector<int64_t> collect_order_entry_column( const int8_t* groupby_buffer, const GroupByBufferLayoutInfo& layout, const size_t start, const size_t step) { thrust::host_vector<int64_t> oe_col_buffer; const auto row_ptr = groupby_buffer + start * layout.row_bytes; auto crt_group_ptr1 = layout.target_groupby_index >= 0 ? row_ptr + layout.target_groupby_index * sizeof(K) : row_ptr + layout.col_off; const int8_t* crt_group_ptr2{nullptr}; if (layout.oe_target_info.agg_kind == kAVG) { crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes; } const auto entry_ti = get_compact_type(layout.oe_target_info); const bool float_argument_input = takes_float_argument(layout.oe_target_info); const auto step_bytes = layout.row_bytes * step; const auto col_bytes = float_argument_input ? entry_ti.get_size() : layout.col_bytes; for (size_t i = start; i < layout.entry_count; i += step) { auto val1 = read_int_from_buff(crt_group_ptr1, col_bytes > 0 ? col_bytes : sizeof(K)); if (crt_group_ptr2) { const auto val2 = read_int_from_buff(crt_group_ptr2, 8); const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input); val1 = *reinterpret_cast<const int64_t*>(&avg_val); } oe_col_buffer.push_back(val1); crt_group_ptr1 += step_bytes; if (crt_group_ptr2) { crt_group_ptr2 += step_bytes; } } return oe_col_buffer; } } // namespace template <class K> std::vector<uint32_t> baseline_sort(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step); const auto& entry_ti = get_compact_type(layout.oe_target_info); CHECK(entry_ti.is_number()); if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) { return baseline_sort_fp<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } // Because of how we represent nulls for integral types, they'd be at the // wrong position in these two cases. Separate them into a different buffer. if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) { return baseline_sort_int<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } ThrustAllocator thrust_allocator(data_mgr, device_id); // Fastest path, no need to separate nulls away since they'll end up at the // right place as a side effect of how we're representing nulls. if (device_type == ExecutorDeviceType::GPU) { if (oe_col_buffer.empty()) { return {}; } const auto dev_idx_buff = get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator); thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step); const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_oe_col_buffer, dev_oe_col_buffer + oe_col_buffer.size(), dev_idx_buff, oe_col_buffer.size(), oe, layout, top_n); } CHECK(device_type == ExecutorDeviceType::CPU); thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size()); thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, oe_col_buffer.begin(), oe_col_buffer.end(), host_idx_buff.begin(), host_idx_buff.size(), oe, layout, top_n); } template std::vector<uint32_t> baseline_sort<int32_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step); template std::vector<uint32_t> baseline_sort<int64_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step);
06ac9353b2e360befc84e29627cf50dcd18a81f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com) // @author Oleh Semeniv (oleg.semeniv@gmail.com) // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/imagesHelpers.h> #include <helpers/ConstantTadHelper.h> #include <ops/declarable/helpers/adjust_hue.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ void rgbToYuvCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; rgbYuv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void rgbToYuvCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { rgbToYuvCuda<T> << <blocksPerGrid, threadsPerBlock, 256, * stream >> > (vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, dimC); } /////////////////////////////////////////////////////////////////// void transformRgbYuv(sd::LaunchContext* context, const NDArray& input, NDArray& output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), { dimC }); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), { dimC }); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "yuv_to_rgb"); NDArray::prepareSpecialUse({ &output }, { &input }); BUILD_SINGLE_SELECTOR(input.dataType(), rgbToYuvCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), packX.platformOffsets(), output.specialBuffer(), output.specialShapeInfo(), packZ.platformOffsets(), numOfTads, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({ &output }, { &input }); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ void yuvToRgbCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; yuvRgb<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void yuvToRgbCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { yuvToRgbCuda<T> << <blocksPerGrid, threadsPerBlock, 256, * stream >> > (vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, dimC); } /////////////////////////////////////////////////////////////////// void transformYuvRgb(sd::LaunchContext* context, const NDArray& input, NDArray& output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), { dimC }); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), { dimC }); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "yuv_to_rgb"); NDArray::prepareSpecialUse({ &output }, { &input }); BUILD_SINGLE_SELECTOR(input.dataType(), yuvToRgbCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), packX.platformOffsets(), output.specialBuffer(), output.specialShapeInfo(), packZ.platformOffsets(), numOfTads, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({ &output }, { &input }); manager.synchronize(); } /////////////////////////////////////////////////////////////////// // for example xShapeInfo = {2,3,4}, zShapeInfo = {2,1,4} template<typename T> __global__ void rgbToGrsCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int dimC) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong zLen; __shared__ int rank, *sharedMem; // xRank == zRank if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); zLen = shape::length(zShapeInfo); rank = shape::rank(zShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x) { if (dimC == (rank - 1) && 'c' == shape::order(xShapeInfo) && 1 == shape::elementWiseStride(xShapeInfo) && 'c' == shape::order(zShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo)) { const auto xStep = i*3; z[i] = 0.2989f * x[xStep] + 0.5870f * x[xStep + 1] + 0.1140f * x[xStep + 2]; } else { shape::index2coords(i, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); const auto xOffset0 = shape::getOffset(xShapeInfo, coords); const auto xOffset1 = xOffset0 + shape::stride(xShapeInfo)[dimC]; const auto xOffset2 = xOffset1 + shape::stride(xShapeInfo)[dimC]; z[zOffset] = 0.2989f * x[xOffset0] + 0.5870f * x[xOffset1] + 0.1140f * x[xOffset2]; } } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void rgbToGrsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int dimC) { hipLaunchKernelGGL(( rgbToGrsCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, dimC); } /////////////////////////////////////////////////////////////////// void transformRgbGrs(sd::LaunchContext* context, const NDArray& input, NDArray& output, const int dimC) { PointersManager manager(context, "rgbToGrs"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = input.rankOf() * sizeof(int) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), rgbToGrsCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), dimC), NUMERIC_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G rgbToHsvCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; rgbToHsv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G hsvToRgbCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; hsvToRgb<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> static _CUDA_H void hsvToRgbCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { hipLaunchKernelGGL(( hsvToRgbCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, dimC); } template<typename T> static _CUDA_H void rgbToHsvCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { hipLaunchKernelGGL(( rgbToHsvCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, dimC); } /////////////////////////////////////////////////////////////////// void transformHsvRgb(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "hsv_to_rgb"); NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), hsvToRgbCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// void transformRgbHsv(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "rgb_to_hsv"); NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), rgbToHsvCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input}); manager.synchronize(); } template<typename T> __global__ void tripleTransformerCuda(const void *vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets, void *vz, const Nd4jLong *zShapeInfo, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets, const int dimC, int mode, uint64_t numTads) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong zLen, *sharedMem; __shared__ int rank; // xRank == zRank float yiqarr[3][3] = { { 0.299f, 0.59590059f, 0.2115f }, { 0.587f, -0.27455667f, -0.52273617f }, { 0.114f, -0.32134392f, 0.31119955f } }; float rgbarr[3][3] = { { 1.f, 1.f, 1.f }, { 0.95598634f, -0.27201283f, -1.10674021f }, { 0.6208248f, -0.64720424f, 1.70423049f } }; auto tr = mode == 1? yiqarr : rgbarr; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); rank = shape::rank(zShapeInfo); } __syncthreads(); Nd4jLong* coords = sharedMem + threadIdx.x * rank; if (dimC == (rank - 1) && 'c' == shape::order(xShapeInfo) && 1 == shape::elementWiseStride(xShapeInfo) && 'c' == shape::order(zShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo)) { for (uint64_t f = blockIdx.x * blockDim.x + threadIdx.x; f < zLen / 3; f += gridDim.x * blockDim.x) { auto i = f * 3; auto xi0 = x[i]; auto xi1 = x[i+1]; auto xi2 = x[i+2]; for (int e = 0; e < 3; e++) z[i + e] = xi0 * tr[0][e] + xi1 * tr[1][e] + xi2 * tr[2][e]; } } else { // TAD based case const Nd4jLong xDimCstride = shape::stride(xShapeInfo)[dimC]; const Nd4jLong zDimCstride = shape::stride(zShapeInfo)[dimC]; for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < numTads; i += blockDim.x * gridDim.x) { const T* xTad = x + xOffsets[i]; T* zTad = z + zOffsets[i]; auto xi0 = xTad[0]; auto xi1 = xTad[xDimCstride]; auto xi2 = xTad[xDimCstride * 2]; for (int e = 0; e < 3; e++) zTad[zDimCstride * e] = xi0 * tr[0][e] + xi1 * tr[1][e] + xi2 * tr[2][e]; } } } template <typename T> static void rgbYiq(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimC); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimC); NDArray::prepareSpecialUse({output}, {input}); returnhipLaunchKernelGGL(( tripleTransformerCuda<T>), dim3(256), dim3(256), 8192, *context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformShapeInfo(), packZ.platformOffsets(), dimC, 1, packZ.numberOfTads()); NDArray::registerSpecialUse({output}, {input}); } template <typename T> FORCEINLINE static void yiqRgb(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimC); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimC); NDArray::prepareSpecialUse({output}, {input}); returnhipLaunchKernelGGL(( tripleTransformerCuda<T>), dim3(256), dim3(256), 8192, *context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformShapeInfo(), packZ.platformOffsets(), dimC, 2, packZ.numberOfTads()); NDArray::registerSpecialUse({output}, {input}); } void transformYiqRgb(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { BUILD_SINGLE_SELECTOR(input->dataType(), yiqRgb, (context, input, output, dimC), FLOAT_TYPES); } void transformRgbYiq(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { BUILD_SINGLE_SELECTOR(input->dataType(), rgbYiq, (context, input, output, dimC), FLOAT_TYPES); } } } }
06ac9353b2e360befc84e29627cf50dcd18a81f8.cu
/******************************************************************************* * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com) // @author Oleh Semeniv (oleg.semeniv@gmail.com) // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/imagesHelpers.h> #include <helpers/ConstantTadHelper.h> #include <ops/declarable/helpers/adjust_hue.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ void rgbToYuvCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; rgbYuv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void rgbToYuvCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { rgbToYuvCuda<T> << <blocksPerGrid, threadsPerBlock, 256, * stream >> > (vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, dimC); } /////////////////////////////////////////////////////////////////// void transformRgbYuv(sd::LaunchContext* context, const NDArray& input, NDArray& output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), { dimC }); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), { dimC }); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "yuv_to_rgb"); NDArray::prepareSpecialUse({ &output }, { &input }); BUILD_SINGLE_SELECTOR(input.dataType(), rgbToYuvCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), packX.platformOffsets(), output.specialBuffer(), output.specialShapeInfo(), packZ.platformOffsets(), numOfTads, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({ &output }, { &input }); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ void yuvToRgbCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; yuvRgb<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void yuvToRgbCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { yuvToRgbCuda<T> << <blocksPerGrid, threadsPerBlock, 256, * stream >> > (vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, dimC); } /////////////////////////////////////////////////////////////////// void transformYuvRgb(sd::LaunchContext* context, const NDArray& input, NDArray& output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), { dimC }); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), { dimC }); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "yuv_to_rgb"); NDArray::prepareSpecialUse({ &output }, { &input }); BUILD_SINGLE_SELECTOR(input.dataType(), yuvToRgbCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), packX.platformOffsets(), output.specialBuffer(), output.specialShapeInfo(), packZ.platformOffsets(), numOfTads, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({ &output }, { &input }); manager.synchronize(); } /////////////////////////////////////////////////////////////////// // for example xShapeInfo = {2,3,4}, zShapeInfo = {2,1,4} template<typename T> __global__ void rgbToGrsCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int dimC) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong zLen; __shared__ int rank, *sharedMem; // xRank == zRank if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); zLen = shape::length(zShapeInfo); rank = shape::rank(zShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x) { if (dimC == (rank - 1) && 'c' == shape::order(xShapeInfo) && 1 == shape::elementWiseStride(xShapeInfo) && 'c' == shape::order(zShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo)) { const auto xStep = i*3; z[i] = 0.2989f * x[xStep] + 0.5870f * x[xStep + 1] + 0.1140f * x[xStep + 2]; } else { shape::index2coords(i, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); const auto xOffset0 = shape::getOffset(xShapeInfo, coords); const auto xOffset1 = xOffset0 + shape::stride(xShapeInfo)[dimC]; const auto xOffset2 = xOffset1 + shape::stride(xShapeInfo)[dimC]; z[zOffset] = 0.2989f * x[xOffset0] + 0.5870f * x[xOffset1] + 0.1140f * x[xOffset2]; } } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void rgbToGrsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int dimC) { rgbToGrsCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, dimC); } /////////////////////////////////////////////////////////////////// void transformRgbGrs(sd::LaunchContext* context, const NDArray& input, NDArray& output, const int dimC) { PointersManager manager(context, "rgbToGrs"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = input.rankOf() * sizeof(int) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), rgbToGrsCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), dimC), NUMERIC_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G rgbToHsvCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; rgbToHsv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G hsvToRgbCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; hsvToRgb<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> static _CUDA_H void hsvToRgbCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { hsvToRgbCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, dimC); } template<typename T> static _CUDA_H void rgbToHsvCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const int dimC) { rgbToHsvCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, dimC); } /////////////////////////////////////////////////////////////////// void transformHsvRgb(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "hsv_to_rgb"); NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), hsvToRgbCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// void transformRgbHsv(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "rgb_to_hsv"); NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), rgbToHsvCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input}); manager.synchronize(); } template<typename T> __global__ void tripleTransformerCuda(const void *vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets, void *vz, const Nd4jLong *zShapeInfo, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets, const int dimC, int mode, uint64_t numTads) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong zLen, *sharedMem; __shared__ int rank; // xRank == zRank float yiqarr[3][3] = { { 0.299f, 0.59590059f, 0.2115f }, { 0.587f, -0.27455667f, -0.52273617f }, { 0.114f, -0.32134392f, 0.31119955f } }; float rgbarr[3][3] = { { 1.f, 1.f, 1.f }, { 0.95598634f, -0.27201283f, -1.10674021f }, { 0.6208248f, -0.64720424f, 1.70423049f } }; auto tr = mode == 1? yiqarr : rgbarr; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); rank = shape::rank(zShapeInfo); } __syncthreads(); Nd4jLong* coords = sharedMem + threadIdx.x * rank; if (dimC == (rank - 1) && 'c' == shape::order(xShapeInfo) && 1 == shape::elementWiseStride(xShapeInfo) && 'c' == shape::order(zShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo)) { for (uint64_t f = blockIdx.x * blockDim.x + threadIdx.x; f < zLen / 3; f += gridDim.x * blockDim.x) { auto i = f * 3; auto xi0 = x[i]; auto xi1 = x[i+1]; auto xi2 = x[i+2]; for (int e = 0; e < 3; e++) z[i + e] = xi0 * tr[0][e] + xi1 * tr[1][e] + xi2 * tr[2][e]; } } else { // TAD based case const Nd4jLong xDimCstride = shape::stride(xShapeInfo)[dimC]; const Nd4jLong zDimCstride = shape::stride(zShapeInfo)[dimC]; for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < numTads; i += blockDim.x * gridDim.x) { const T* xTad = x + xOffsets[i]; T* zTad = z + zOffsets[i]; auto xi0 = xTad[0]; auto xi1 = xTad[xDimCstride]; auto xi2 = xTad[xDimCstride * 2]; for (int e = 0; e < 3; e++) zTad[zDimCstride * e] = xi0 * tr[0][e] + xi1 * tr[1][e] + xi2 * tr[2][e]; } } } template <typename T> static void rgbYiq(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimC); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimC); NDArray::prepareSpecialUse({output}, {input}); return tripleTransformerCuda<T><<<256, 256, 8192, *context->getCudaStream()>>>(input->specialBuffer(), input->specialShapeInfo(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformShapeInfo(), packZ.platformOffsets(), dimC, 1, packZ.numberOfTads()); NDArray::registerSpecialUse({output}, {input}); } template <typename T> FORCEINLINE static void yiqRgb(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimC); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimC); NDArray::prepareSpecialUse({output}, {input}); return tripleTransformerCuda<T><<<256, 256, 8192, *context->getCudaStream()>>>(input->specialBuffer(), input->specialShapeInfo(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformShapeInfo(), packZ.platformOffsets(), dimC, 2, packZ.numberOfTads()); NDArray::registerSpecialUse({output}, {input}); } void transformYiqRgb(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { BUILD_SINGLE_SELECTOR(input->dataType(), yiqRgb, (context, input, output, dimC), FLOAT_TYPES); } void transformRgbYiq(sd::LaunchContext* context, const NDArray* input, NDArray* output, const int dimC) { BUILD_SINGLE_SELECTOR(input->dataType(), rgbYiq, (context, input, output, dimC), FLOAT_TYPES); } } } }
515c11b9251d8c982c7f23b9b6a8b15750afa732.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/backends/cuda/math/sequence_padding.h" #include "lite/core/op_registry.h" #include "lite/core/target_wrapper.h" #include "lite/kernels/cuda/sequence_pad_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { template <typename T, PrecisionType Ptype> void SequencePadCompute<T, Ptype>::Run() { auto& param = this->template Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); const auto* x = param.X; const auto* pad_value = param.PadValue; auto* out = param.Out; auto* len_t = param.Length; int seq_num = x->lod()[0].size() - 1; int padded_length; if (param.padded_length == -1) { int max_seq_len = 0; for (int i = 0; i < seq_num; ++i) { max_seq_len = ::max( max_seq_len, static_cast<int>(x->lod()[0][i + 1] - x->lod()[0][i])); } padded_length = max_seq_len; } else { padded_length = param.padded_length; } int max_seq_len = 0; int step_width = x->numel() / x->dims()[0]; // calc for param.Lenght seq_len_.resize(seq_num); seq_offsets_vec_.resize(x->lod()[0].size()); for (size_t i = 0; i < seq_num; ++i) { max_seq_len = ::max( max_seq_len, static_cast<int>(x->lod()[0][i + 1] - x->lod()[0][i])); seq_len_[i] = x->lod()[0][i + 1] - x->lod()[0][i]; seq_offsets_vec_[i] = x->lod()[0][i]; } seq_offsets_vec_[seq_num] = x->lod()[0][seq_num]; TargetWrapperCuda::MemcpyAsync( len_t->template mutable_data<int64_t>(TARGET(kCUDA)), seq_len_.data(), sizeof(int64_t) * seq_len_.size(), IoDirection::HtoD, stream); seq_offsets_.Resize({static_cast<int64_t>(x->lod()[0].size())}); TargetWrapperCuda::MemcpyAsync( seq_offsets_.mutable_data<size_t>(TARGET(kCUDA)), seq_offsets_vec_.data(), sizeof(size_t) * seq_offsets_vec_.size(), IoDirection::HtoD, stream); const T* seq_data = x->template data<T>(); T* pad_data = out->template mutable_data<T>(TARGET(kCUDA)); const T* pad_value_data = pad_value->template data<T>(); lite::cuda::math::SequencePadding(pad_data, seq_data, pad_value_data, pad_value->numel() == 1, seq_offsets_.data<size_t>(), seq_num, padded_length, step_width, &stream); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle using SeqPadFp32 = paddle::lite::kernels::cuda::SequencePadCompute<float, PRECISION(kFloat)>; using SeqPadFp16 = paddle::lite::kernels::cuda::SequencePadCompute<half, PRECISION(kFP16)>; REGISTER_LITE_KERNEL(sequence_pad, kCUDA, kFloat, kNCHW, SeqPadFp32, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindInput("PadValue", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Length", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt64))}) .Finalize(); REGISTER_LITE_KERNEL(sequence_pad, kCUDA, kFP16, kNCHW, SeqPadFp16, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))}) .BindInput("PadValue", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))}) .BindOutput("Length", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt64))}) .Finalize();
515c11b9251d8c982c7f23b9b6a8b15750afa732.cu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/backends/cuda/math/sequence_padding.h" #include "lite/core/op_registry.h" #include "lite/core/target_wrapper.h" #include "lite/kernels/cuda/sequence_pad_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { template <typename T, PrecisionType Ptype> void SequencePadCompute<T, Ptype>::Run() { auto& param = this->template Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); const auto* x = param.X; const auto* pad_value = param.PadValue; auto* out = param.Out; auto* len_t = param.Length; int seq_num = x->lod()[0].size() - 1; int padded_length; if (param.padded_length == -1) { int max_seq_len = 0; for (int i = 0; i < seq_num; ++i) { max_seq_len = std::max( max_seq_len, static_cast<int>(x->lod()[0][i + 1] - x->lod()[0][i])); } padded_length = max_seq_len; } else { padded_length = param.padded_length; } int max_seq_len = 0; int step_width = x->numel() / x->dims()[0]; // calc for param.Lenght seq_len_.resize(seq_num); seq_offsets_vec_.resize(x->lod()[0].size()); for (size_t i = 0; i < seq_num; ++i) { max_seq_len = std::max( max_seq_len, static_cast<int>(x->lod()[0][i + 1] - x->lod()[0][i])); seq_len_[i] = x->lod()[0][i + 1] - x->lod()[0][i]; seq_offsets_vec_[i] = x->lod()[0][i]; } seq_offsets_vec_[seq_num] = x->lod()[0][seq_num]; TargetWrapperCuda::MemcpyAsync( len_t->template mutable_data<int64_t>(TARGET(kCUDA)), seq_len_.data(), sizeof(int64_t) * seq_len_.size(), IoDirection::HtoD, stream); seq_offsets_.Resize({static_cast<int64_t>(x->lod()[0].size())}); TargetWrapperCuda::MemcpyAsync( seq_offsets_.mutable_data<size_t>(TARGET(kCUDA)), seq_offsets_vec_.data(), sizeof(size_t) * seq_offsets_vec_.size(), IoDirection::HtoD, stream); const T* seq_data = x->template data<T>(); T* pad_data = out->template mutable_data<T>(TARGET(kCUDA)); const T* pad_value_data = pad_value->template data<T>(); lite::cuda::math::SequencePadding(pad_data, seq_data, pad_value_data, pad_value->numel() == 1, seq_offsets_.data<size_t>(), seq_num, padded_length, step_width, &stream); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle using SeqPadFp32 = paddle::lite::kernels::cuda::SequencePadCompute<float, PRECISION(kFloat)>; using SeqPadFp16 = paddle::lite::kernels::cuda::SequencePadCompute<half, PRECISION(kFP16)>; REGISTER_LITE_KERNEL(sequence_pad, kCUDA, kFloat, kNCHW, SeqPadFp32, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindInput("PadValue", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Length", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt64))}) .Finalize(); REGISTER_LITE_KERNEL(sequence_pad, kCUDA, kFP16, kNCHW, SeqPadFp16, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))}) .BindInput("PadValue", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))}) .BindOutput("Length", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt64))}) .Finalize();
8303ec62def0f6d7acc26fa04527c8a49a302d07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include <cstdio> #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int2 thread_in_2d = make_int2 (blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_in_1d = thread_in_2d.y * numCols + thread_in_2d.x; if (thread_in_2d.x >= numCols || thread_in_2d.y >= numRows) return; int r, c, ir, ic, itr, itc; float res = 0.0; ir = thread_in_2d.y - (filterWidth - 1) / 2; for (r = 0; r < filterWidth; r++, ir++) { ic = thread_in_2d.x - (filterWidth - 1) / 2; for (c = 0; c < filterWidth; c++, ic++) { itr = min (max (ir, 0), numRows - 1); itc = min (max (ic, 0), numCols - 1); res += filter[r * filterWidth + c] * static_cast<float>( inputChannel[itr * numCols + itc]); } } outputChannel[thread_in_1d] = res; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_in_2d = make_int2 (blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_in_1d = thread_in_2d.y * numCols + thread_in_2d.x; if (thread_in_2d.x >= numCols || thread_in_2d.y >= numRows) return; uchar4 pix = inputImageRGBA[thread_in_1d]; redChannel[thread_in_1d] = pix.x; greenChannel[thread_in_1d] = pix.y; blueChannel[thread_in_1d] = pix.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc size_t numFilterBytes = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(hipMalloc(&d_filter, numFilterBytes)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, numFilterBytes, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { #ifndef DEBUG const int nRt = 32; // min ((int) numRows, 1024); const int nCt = 32; // (int) floor (1024.0 / (float) nRt); const int nRg = (int) ceil ((float) numRows / (float) nRt); const int nCg = (int) ceil ((float) numCols / (float) nCt); //Set reasonable block size (i.e., number of threads per block) const dim3 blockSize (nCt, nRt); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize (nCg, nRg); #else const dim3 blockSize (10, 10); const dim3 gridSize (1, 1); #endif printf ("# grids (%d,%d) # threads (%d,%d)\n", gridSize.x, gridSize.y, blockSize.x, blockSize.y); unsigned char *d_redTmp, *d_greenTmp, *d_blueTmp; const int bytes = numRows * numCols; // allocate space for temp arrays for each channel checkCudaErrors(hipMalloc(&d_redTmp, bytes)); checkCudaErrors(hipMalloc(&d_greenTmp, bytes)); checkCudaErrors(hipMalloc(&d_blueTmp, bytes)); // copy host input image to device input image checkCudaErrors(hipMemcpy(d_inputImageRGBA, h_inputImageRGBA, bytes * 3, hipMemcpyHostToDevice)); // Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_redTmp, d_greenTmp, d_blueTmp); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_redTmp, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_greenTmp, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blueTmp, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(d_redTmp)); checkCudaErrors(hipFree(d_greenTmp)); checkCudaErrors(hipFree(d_blueTmp)); } //Free all the memory that we allocated // make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
8303ec62def0f6d7acc26fa04527c8a49a302d07.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include <cstdio> #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int2 thread_in_2d = make_int2 (blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_in_1d = thread_in_2d.y * numCols + thread_in_2d.x; if (thread_in_2d.x >= numCols || thread_in_2d.y >= numRows) return; int r, c, ir, ic, itr, itc; float res = 0.0; ir = thread_in_2d.y - (filterWidth - 1) / 2; for (r = 0; r < filterWidth; r++, ir++) { ic = thread_in_2d.x - (filterWidth - 1) / 2; for (c = 0; c < filterWidth; c++, ic++) { itr = min (max (ir, 0), numRows - 1); itc = min (max (ic, 0), numCols - 1); res += filter[r * filterWidth + c] * static_cast<float>( inputChannel[itr * numCols + itc]); } } outputChannel[thread_in_1d] = res; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_in_2d = make_int2 (blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_in_1d = thread_in_2d.y * numCols + thread_in_2d.x; if (thread_in_2d.x >= numCols || thread_in_2d.y >= numRows) return; uchar4 pix = inputImageRGBA[thread_in_1d]; redChannel[thread_in_1d] = pix.x; greenChannel[thread_in_1d] = pix.y; blueChannel[thread_in_1d] = pix.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc size_t numFilterBytes = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(cudaMalloc(&d_filter, numFilterBytes)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, numFilterBytes, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { #ifndef DEBUG const int nRt = 32; // min ((int) numRows, 1024); const int nCt = 32; // (int) floor (1024.0 / (float) nRt); const int nRg = (int) ceil ((float) numRows / (float) nRt); const int nCg = (int) ceil ((float) numCols / (float) nCt); //Set reasonable block size (i.e., number of threads per block) const dim3 blockSize (nCt, nRt); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize (nCg, nRg); #else const dim3 blockSize (10, 10); const dim3 gridSize (1, 1); #endif printf ("# grids (%d,%d) # threads (%d,%d)\n", gridSize.x, gridSize.y, blockSize.x, blockSize.y); unsigned char *d_redTmp, *d_greenTmp, *d_blueTmp; const int bytes = numRows * numCols; // allocate space for temp arrays for each channel checkCudaErrors(cudaMalloc(&d_redTmp, bytes)); checkCudaErrors(cudaMalloc(&d_greenTmp, bytes)); checkCudaErrors(cudaMalloc(&d_blueTmp, bytes)); // copy host input image to device input image checkCudaErrors(cudaMemcpy(d_inputImageRGBA, h_inputImageRGBA, bytes * 3, cudaMemcpyHostToDevice)); // Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>> (d_inputImageRGBA, numRows, numCols, d_redTmp, d_greenTmp, d_blueTmp); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>> (d_redTmp, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>> (d_greenTmp, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>> (d_blueTmp, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(d_redTmp)); checkCudaErrors(cudaFree(d_greenTmp)); checkCudaErrors(cudaFree(d_blueTmp)); } //Free all the memory that we allocated // make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
5a293ee91662e2f04aec6ce29b1060220d8ccb37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../knet.h" template<typename dType> __global__ void _nce_grad_real(int n, dType *ypred, dType *kqvec, dType *ygrad) { int i = threadIdx.x + blockIdx.x * blockDim.x; while(i < n) { int ij = n*i+i; ygrad[ij] = -(kqvec[i]/(exp(ypred[ij]) + kqvec[i]))/n; i += blockDim.x * gridDim.x; } } extern "C" { void nce_grad_real_32(int n, float *ypred, float *kqvec, float *ygrad) KCALL(_nce_grad_real,n,ypred,kqvec,ygrad); void nce_grad_real_64(int n, double *ypred, double *kqvec, double *ygrad) KCALL(_nce_grad_real,n,ypred,kqvec,ygrad); } template<typename dType> __global__ void _nce_loss_real(int n, dType *ypred, dType *kqvec, dType *ytemp) { int i = threadIdx.x + blockIdx.x * blockDim.x; while(i < n) { dType s = ypred[n*i+i]; ytemp[i] = log(exp(s) + kqvec[i]) - s; i += blockDim.x * gridDim.x; } } extern "C" { void nce_loss_real_32(int n, float *ypred, float *kqvec, float *ytemp) KCALL(_nce_loss_real,n,ypred,kqvec,ytemp); void nce_loss_real_64(int n, double *ypred, double *kqvec, double *ytemp) KCALL(_nce_loss_real,n,ypred,kqvec,ytemp); } template<typename dType> __global__ void _nce_loss_noise(int K, int B, dType *ypred, dType *kqvec, dType *ytemp) { int kb = threadIdx.x + blockIdx.x * blockDim.x; int KB = K*B; while(kb < KB) { dType s = ypred[kb]; dType kq = kqvec[kb % K]; ytemp[kb] = -log(kq)+log(exp(s)+kq); kb += blockDim.x * gridDim.x; } } extern "C" { void nce_loss_noise_32(int K, int B, float *ypred, float *kqvec, float *ytemp) KCALL(_nce_loss_noise,K,B,ypred,kqvec,ytemp); void nce_loss_noise_64(int K, int B, double *ypred, double *kqvec, double *ytemp) KCALL(_nce_loss_noise,K,B,ypred,kqvec,ytemp); } template<typename dType> __global__ void _nce_grad_noise(int K, int B, dType *ypred, dType *kqvec, dType *ygrad) { int kb = threadIdx.x + blockIdx.x * blockDim.x; int KB = K*B; while(kb < KB) { dType exps = exp(ypred[kb]); dType kq = kqvec[kb % K]; ygrad[kb] = (exps/(exps+kq))/B; kb += blockDim.x * gridDim.x; } } extern "C" { void nce_grad_noise_32(int K, int B, float *ypred, float *kqvec, float *ygrad) KCALL(_nce_grad_noise,K,B,ypred,kqvec,ygrad); void nce_grad_noise_64(int K, int B, double *ypred, double *kqvec, double *ygrad) KCALL(_nce_grad_noise,K,B,ypred,kqvec,ygrad); }
5a293ee91662e2f04aec6ce29b1060220d8ccb37.cu
#include "../knet.h" template<typename dType> __global__ void _nce_grad_real(int n, dType *ypred, dType *kqvec, dType *ygrad) { int i = threadIdx.x + blockIdx.x * blockDim.x; while(i < n) { int ij = n*i+i; ygrad[ij] = -(kqvec[i]/(exp(ypred[ij]) + kqvec[i]))/n; i += blockDim.x * gridDim.x; } } extern "C" { void nce_grad_real_32(int n, float *ypred, float *kqvec, float *ygrad) KCALL(_nce_grad_real,n,ypred,kqvec,ygrad); void nce_grad_real_64(int n, double *ypred, double *kqvec, double *ygrad) KCALL(_nce_grad_real,n,ypred,kqvec,ygrad); } template<typename dType> __global__ void _nce_loss_real(int n, dType *ypred, dType *kqvec, dType *ytemp) { int i = threadIdx.x + blockIdx.x * blockDim.x; while(i < n) { dType s = ypred[n*i+i]; ytemp[i] = log(exp(s) + kqvec[i]) - s; i += blockDim.x * gridDim.x; } } extern "C" { void nce_loss_real_32(int n, float *ypred, float *kqvec, float *ytemp) KCALL(_nce_loss_real,n,ypred,kqvec,ytemp); void nce_loss_real_64(int n, double *ypred, double *kqvec, double *ytemp) KCALL(_nce_loss_real,n,ypred,kqvec,ytemp); } template<typename dType> __global__ void _nce_loss_noise(int K, int B, dType *ypred, dType *kqvec, dType *ytemp) { int kb = threadIdx.x + blockIdx.x * blockDim.x; int KB = K*B; while(kb < KB) { dType s = ypred[kb]; dType kq = kqvec[kb % K]; ytemp[kb] = -log(kq)+log(exp(s)+kq); kb += blockDim.x * gridDim.x; } } extern "C" { void nce_loss_noise_32(int K, int B, float *ypred, float *kqvec, float *ytemp) KCALL(_nce_loss_noise,K,B,ypred,kqvec,ytemp); void nce_loss_noise_64(int K, int B, double *ypred, double *kqvec, double *ytemp) KCALL(_nce_loss_noise,K,B,ypred,kqvec,ytemp); } template<typename dType> __global__ void _nce_grad_noise(int K, int B, dType *ypred, dType *kqvec, dType *ygrad) { int kb = threadIdx.x + blockIdx.x * blockDim.x; int KB = K*B; while(kb < KB) { dType exps = exp(ypred[kb]); dType kq = kqvec[kb % K]; ygrad[kb] = (exps/(exps+kq))/B; kb += blockDim.x * gridDim.x; } } extern "C" { void nce_grad_noise_32(int K, int B, float *ypred, float *kqvec, float *ygrad) KCALL(_nce_grad_noise,K,B,ypred,kqvec,ygrad); void nce_grad_noise_64(int K, int B, double *ypred, double *kqvec, double *ygrad) KCALL(_nce_grad_noise,K,B,ypred,kqvec,ygrad); }
6cfa4d37fb30fbb3b55cceabc240da3152ccfc58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "psrdada_cpp/meerkat/fbfuse/WeightsManager.cuh" #include "psrdada_cpp/meerkat/fbfuse/PipelineConfig.hpp" #include "psrdada_cpp/meerkat/fbfuse/DelayManager.cuh" #include "psrdada_cpp/cuda_utils.hpp" #include <thrust/device_vector.h> #define TWOPI 6.283185307179586f namespace psrdada_cpp { namespace meerkat { namespace fbfuse { namespace kernels { __global__ void generate_weights_k( float2 const * __restrict__ delay_models, char2 * __restrict__ weights, float const * __restrict__ channel_frequencies, int nantennas, int nbeams, int nchans, float tstart, float tstep, int ntsteps) { //for each loaded delay poly we can produce multiple epochs for one antenna, one beam, all frequencies and both pols //Different blocks should handle different beams (as antennas are on the inner dimension of the output product) //Basics of this kernel: // // gridDim.x is used for beams (there is a loop if you want to limit the grid size) // gridDim.y is used for channels (there is a loop if you want to limit the grid size) // blockDim.x is used for antennas (there is a loop if you want to limit the grid size) // // Time steps are handled in a the inner loop. As antennas are on the inner dimension of // both the input and the output array, all reads and writes should be coalesced. const int weights_per_beam = nantennas; const int weights_per_channel = weights_per_beam * nbeams; const int weights_per_time_step = weights_per_channel * nchans; float2 weight; char2 compressed_weight; //This isn't really needed as there will never be more than 64 antennas //However this makes this fucntion more flexible with smaller blocks for (int chan_idx = blockIdx.y; chan_idx < nchans; chan_idx += gridDim.y) { float frequency = channel_frequencies[chan_idx]; int chan_offset = chan_idx * weights_per_channel; // correct for (int beam_idx = blockIdx.x; beam_idx < nbeams; beam_idx += gridDim.x) { int beam_offset = chan_offset + beam_idx * weights_per_beam; // correct for (int antenna_idx = threadIdx.x; antenna_idx < nantennas; antenna_idx+=blockDim.x) { float2 delay_model = delay_models[beam_idx * nantennas + antenna_idx]; // correct int antenna_offset = beam_offset + antenna_idx; for (int time_idx = threadIdx.y; time_idx < ntsteps; time_idx+=blockDim.y) { //Calculates epoch offset float t = tstart + time_idx * tstep; float phase = (t * delay_model.x + delay_model.y) * frequency; //This is possible as the magnitude of the weight is 1 //If we ever have to implement scalar weightings, this //must change. __sincosf(TWOPI * phase, &weight.y, &weight.x); compressed_weight.x = (char) __float2int_rn(weight.x * 127.0f); compressed_weight.y = (char) __float2int_rn(weight.y * 127.0f); int output_idx = time_idx * weights_per_time_step + antenna_offset; weights[output_idx] = compressed_weight; } } } } } } //namespace kernels WeightsManager::WeightsManager(PipelineConfig const& config, hipStream_t stream) : _config(config) , _stream(stream) { std::size_t nbeams = _config.cb_nbeams(); std::size_t nantennas = _config.cb_nantennas(); BOOST_LOG_TRIVIAL(debug) << "Constructing WeightsManager instance to hold weights for " << nbeams << " beams and " << nantennas << " antennas"; _weights.resize(nbeams * nantennas * _config.nchans()); // This should be an implicit copy to the device BOOST_LOG_TRIVIAL(debug) << "Copying channel frequencies to the GPU"; _channel_frequencies = _config.channel_frequencies(); } WeightsManager::~WeightsManager() { } WeightsManager::WeightsVectorType const& WeightsManager::weights( DelayVectorType const& delays, TimeType epoch) { // First we retrieve new delays if there are any. BOOST_LOG_TRIVIAL(debug) << "Requesting weights for epoch = " << epoch; DelayManager::DelayType const* delays_ptr = thrust::raw_pointer_cast(delays.data()); WeightsType* weights_ptr = thrust::raw_pointer_cast(_weights.data()); FreqType const* frequencies_ptr = thrust::raw_pointer_cast(_channel_frequencies.data()); dim3 grid(_config.cb_nbeams(), _channel_frequencies.size(), 1); dim3 block(32, 32, 1); BOOST_LOG_TRIVIAL(debug) << "Launching weights generation kernel"; hipLaunchKernelGGL(( kernels::generate_weights_k), dim3(grid), dim3(block), 0, _stream , delays_ptr, weights_ptr, frequencies_ptr, _config.cb_nantennas(), _config.cb_nbeams(), _channel_frequencies.size(), epoch, 0.0, 1); CUDA_ERROR_CHECK(hipStreamSynchronize(_stream)); BOOST_LOG_TRIVIAL(debug) << "Weights successfully generated"; return _weights; } } //namespace fbfuse } //namespace meerkat } //namespace psrdada_cpp
6cfa4d37fb30fbb3b55cceabc240da3152ccfc58.cu
#include "psrdada_cpp/meerkat/fbfuse/WeightsManager.cuh" #include "psrdada_cpp/meerkat/fbfuse/PipelineConfig.hpp" #include "psrdada_cpp/meerkat/fbfuse/DelayManager.cuh" #include "psrdada_cpp/cuda_utils.hpp" #include <thrust/device_vector.h> #define TWOPI 6.283185307179586f namespace psrdada_cpp { namespace meerkat { namespace fbfuse { namespace kernels { __global__ void generate_weights_k( float2 const * __restrict__ delay_models, char2 * __restrict__ weights, float const * __restrict__ channel_frequencies, int nantennas, int nbeams, int nchans, float tstart, float tstep, int ntsteps) { //for each loaded delay poly we can produce multiple epochs for one antenna, one beam, all frequencies and both pols //Different blocks should handle different beams (as antennas are on the inner dimension of the output product) //Basics of this kernel: // // gridDim.x is used for beams (there is a loop if you want to limit the grid size) // gridDim.y is used for channels (there is a loop if you want to limit the grid size) // blockDim.x is used for antennas (there is a loop if you want to limit the grid size) // // Time steps are handled in a the inner loop. As antennas are on the inner dimension of // both the input and the output array, all reads and writes should be coalesced. const int weights_per_beam = nantennas; const int weights_per_channel = weights_per_beam * nbeams; const int weights_per_time_step = weights_per_channel * nchans; float2 weight; char2 compressed_weight; //This isn't really needed as there will never be more than 64 antennas //However this makes this fucntion more flexible with smaller blocks for (int chan_idx = blockIdx.y; chan_idx < nchans; chan_idx += gridDim.y) { float frequency = channel_frequencies[chan_idx]; int chan_offset = chan_idx * weights_per_channel; // correct for (int beam_idx = blockIdx.x; beam_idx < nbeams; beam_idx += gridDim.x) { int beam_offset = chan_offset + beam_idx * weights_per_beam; // correct for (int antenna_idx = threadIdx.x; antenna_idx < nantennas; antenna_idx+=blockDim.x) { float2 delay_model = delay_models[beam_idx * nantennas + antenna_idx]; // correct int antenna_offset = beam_offset + antenna_idx; for (int time_idx = threadIdx.y; time_idx < ntsteps; time_idx+=blockDim.y) { //Calculates epoch offset float t = tstart + time_idx * tstep; float phase = (t * delay_model.x + delay_model.y) * frequency; //This is possible as the magnitude of the weight is 1 //If we ever have to implement scalar weightings, this //must change. __sincosf(TWOPI * phase, &weight.y, &weight.x); compressed_weight.x = (char) __float2int_rn(weight.x * 127.0f); compressed_weight.y = (char) __float2int_rn(weight.y * 127.0f); int output_idx = time_idx * weights_per_time_step + antenna_offset; weights[output_idx] = compressed_weight; } } } } } } //namespace kernels WeightsManager::WeightsManager(PipelineConfig const& config, cudaStream_t stream) : _config(config) , _stream(stream) { std::size_t nbeams = _config.cb_nbeams(); std::size_t nantennas = _config.cb_nantennas(); BOOST_LOG_TRIVIAL(debug) << "Constructing WeightsManager instance to hold weights for " << nbeams << " beams and " << nantennas << " antennas"; _weights.resize(nbeams * nantennas * _config.nchans()); // This should be an implicit copy to the device BOOST_LOG_TRIVIAL(debug) << "Copying channel frequencies to the GPU"; _channel_frequencies = _config.channel_frequencies(); } WeightsManager::~WeightsManager() { } WeightsManager::WeightsVectorType const& WeightsManager::weights( DelayVectorType const& delays, TimeType epoch) { // First we retrieve new delays if there are any. BOOST_LOG_TRIVIAL(debug) << "Requesting weights for epoch = " << epoch; DelayManager::DelayType const* delays_ptr = thrust::raw_pointer_cast(delays.data()); WeightsType* weights_ptr = thrust::raw_pointer_cast(_weights.data()); FreqType const* frequencies_ptr = thrust::raw_pointer_cast(_channel_frequencies.data()); dim3 grid(_config.cb_nbeams(), _channel_frequencies.size(), 1); dim3 block(32, 32, 1); BOOST_LOG_TRIVIAL(debug) << "Launching weights generation kernel"; kernels::generate_weights_k<<< grid, block, 0, _stream >>>(delays_ptr, weights_ptr, frequencies_ptr, _config.cb_nantennas(), _config.cb_nbeams(), _channel_frequencies.size(), epoch, 0.0, 1); CUDA_ERROR_CHECK(cudaStreamSynchronize(_stream)); BOOST_LOG_TRIVIAL(debug) << "Weights successfully generated"; return _weights; } } //namespace fbfuse } //namespace meerkat } //namespace psrdada_cpp
027ff3f7eb3100fe9e90a5afe6b1c9510bddbd08.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <fstream> #include <chrono> #include <hip/hip_runtime.h> #include <iostream> #include <vector> #define BlockSize 32 const int INF = 1000000000; void input(char *inFileName); void output(char *outFileName); void block_FW(int B,char*); int ceil(int a, int b); void cal(char* d,size_t pitch,int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height,hipStream_t stream); void cpu(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height); int n, m; int* d; double io_time = 0; double comp_time = 0; double mem_time = 0; int main(int argc, char* argv[]) { auto io_beg = std::chrono::high_resolution_clock::now(); input(argv[1]); auto io_end = std::chrono::high_resolution_clock::now(); io_time += std::chrono::duration<double>(io_end-io_beg).count(); int B = BlockSize; block_FW(B,argv[2]); io_beg = std::chrono::high_resolution_clock::now(); output(argv[2]); io_end = std::chrono::high_resolution_clock::now(); io_time += std::chrono::duration<double>(io_end-io_beg).count(); std::cout<< comp_time <<" "<<mem_time<<" "<<io_time; delete d; return 0; } void input(char* infile) { FILE* file = fopen(infile, "rb"); fread(&n, sizeof(int), 1, file); fread(&m, sizeof(int), 1, file); int *buf = new int[m*3]; d = new int[n*n]; fread(buf, sizeof(int), 3*m, file); #pragma omp parallel for for (int i = 0; i < n; ++ i) { for (int j = 0; j < n; ++ j) { if (i == j) { d[i*n+j] = 0; } else { d[i*n+j] = INF; } } } #pragma omp parallel for for (int i = 0; i < m; ++ i) { int pair[3]; // fread(pair, sizeof(int), 3, file); for(int j=0;j<3;j++) pair[j]=buf[i*3+j]; d[pair[0]*n+pair[1]] = pair[2]; } fclose(file); delete buf; } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); fwrite(d, sizeof(int), n*n, outfile); fclose(outfile); } int ceil(int a, int b) { return (a + b - 1) / b; } __global__ void kernel_I(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r); __global__ void kernel_II(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r); __global__ void kernel_III(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r); inline void moveBlock(char** ptr,int dst, int src,int x,int y,int B,size_t pitch){ for(int k=B*x;k<B*(x+1);k++) hipMemcpyPeer(ptr[dst]+pitch*B*k+sizeof(int)*B*y,dst, ptr[src]+pitch*B*k+sizeof(int)*B*y,src, sizeof(int)*B); } void block_FW(int B, char* outFileName) { int round = ceil(n, B); char *device_d[2]; size_t pitch[2]; // hipMalloc(&device_d,sizeof(int)*n*n); // hipMemcpy(device_d,d,sizeof(int)*n*n,hipMemcpyHostToDevice); auto mem_beg = std::chrono::high_resolution_clock::now(); for(int dev=0;dev<2;dev++){ hipSetDevice(dev); hipDeviceEnablePeerAccess(!dev,0); hipMallocPitch(&device_d[dev],&pitch[dev],sizeof(int)*round*B,round*B); hipMemcpy2DAsync(device_d[dev],pitch[dev], d,sizeof(int)*n, sizeof(int)*n,n,hipMemcpyHostToDevice); } auto mem_end = std::chrono::high_resolution_clock::now(); mem_time += std::chrono::duration<double>(mem_end-mem_beg).count(); for(int dev=0;dev<2;dev++){ hipSetDevice(dev); hipDeviceSynchronize(); } auto comp_beg = std::chrono::high_resolution_clock::now(); for (int r = 0; r <= round; ++r) { dim3 dimBlock(B,B); dim3 dimGrid(1,1); // for(int dev=0;dev<2;dev++){ // hipSetDevice(dev); // hipLaunchKernelGGL(( kernel_I) , dim3(dimGrid),dim3(dimBlock),0,0, device_d[dev],pitch[dev],r,r,n,B,r); // dimGrid = dim3(2,round-1); // hipLaunchKernelGGL(( kernel_II) , dim3(dimGrid),dim3(dimBlock),0,0, device_d[dev],pitch[dev],0,0,n,B,r); // } for(int dev=0;dev<2;dev++){ hipSetDevice(dev); hipDeviceSynchronize(); } hipStream_t streams[2][round-round/2]; for(int i=0;i<round-round/2;i++) for(int j=0;j<2;j++) hipStreamCreate(&streams[j][i]); #pragma omp parallel sections { #pragma omp section { hipSetDevice(0); hipLaunchKernelGGL(( kernel_I) , dim3(dimGrid),dim3(dimBlock),0,0, device_d[0],pitch[0],r,r,n,B,r); dimGrid = dim3(2,round-1); hipLaunchKernelGGL(( kernel_II) , dim3(dimGrid),dim3(dimBlock),0,0, device_d[0],pitch[0],0,0,n,B,r); dimGrid = dim3(1,round); for(int i=0;i<round/2;i++){ hipLaunchKernelGGL(( kernel_III), dim3(dimGrid),dim3(dimBlock),0,streams[0][i], device_d[0],pitch[0],i,0,n,B,r); // hipMemcpyPeerAsync(device_d[0]+i*pitch[0]*B,0, // device_d[1]+i*pitch[1]*B,1, // pitch[1]*B,streams[1]); hipMemcpy2DAsync(device_d[0]+i*pitch[0]*B,pitch[0], device_d[1]+i*pitch[1]*B,pitch[1], sizeof(int)*n,B,hipMemcpyDefault,streams[0][i]); } hipDeviceSynchronize(); } #pragma omp section { hipSetDevice(1); hipLaunchKernelGGL(( kernel_I) , dim3(dimGrid),dim3(dimBlock),0,0, device_d[1],pitch[1],r,r,n,B,r); dimGrid = dim3(2,round-1); hipLaunchKernelGGL(( kernel_II) , dim3(dimGrid),dim3(dimBlock),0,0, device_d[1],pitch[1],0,0,n,B,r); dimGrid = dim3(1,round); for(int i=round/2;i<round;i++){ hipLaunchKernelGGL(( kernel_III), dim3(dimGrid),dim3(dimBlock),0,streams[1][i-round/2], device_d[1],pitch[1],i,0,n,B,r); // hipMemcpyPeerAsync(device_d[1]+i*pitch[1]*B,1, // device_d[0]+i*pitch[0]*B,0, // pitch[0]*B,streams[1]); hipMemcpy2DAsync(device_d[1]+i*pitch[1]*B,pitch[1], device_d[0]+i*pitch[0]*B,pitch[0], sizeof(int)*n,B,hipMemcpyDefault,streams[1][i-round/2]); } hipDeviceSynchronize(); } } for(int dev=0;dev<2;dev++){ hipSetDevice(dev); hipDeviceSynchronize(); } } auto comp_end = std::chrono::high_resolution_clock::now(); comp_time += std::chrono::duration<double>(comp_end-comp_beg).count(); mem_beg = std::chrono::high_resolution_clock::now(); hipSetDevice(0); hipMemcpy2DAsync(d,sizeof(int)*n, device_d[0],pitch[0], sizeof(int)*n,round/2*B,hipMemcpyDeviceToHost); hipSetDevice(1); hipMemcpy2DAsync(d+round/2*B*n,sizeof(int)*n, device_d[1]+round/2*B*pitch[1],pitch[1], sizeof(int)*n,n-round/2*B,hipMemcpyDeviceToHost); for(int dev=0;dev<2;dev++){ hipSetDevice(dev); hipDeviceSynchronize(); } mem_end = std::chrono::high_resolution_clock::now(); mem_time += std::chrono::duration<double>(mem_end-mem_beg).count(); // hipMemcpy(d,device_d,sizeof(int)*n*n,hipMemcpyDeviceToHost); for(int dev=0;dev<2;dev++) hipFree(device_d[dev]); } __device__ inline int gmin(int a,int b){ return (a>b)*b+(a<=b)*a; } __global__ void kernel_I(char* d,size_t pitch,int block_x, int block_y,int n,int B, int r){ __shared__ int d_i_j[BlockSize][BlockSize+1]; const unsigned int i = block_x*B+threadIdx.x; const unsigned int j = block_y*B+threadIdx.y; // const int idx = threadIdx.y*blockDim.x*threadIdx.x; int* d_i = (int*)(d+pitch*i); unsigned int origin_path = i<n&&j<n? __ldg(&d_i[j]) : INF; d_i_j[threadIdx.x][threadIdx.y] = origin_path; // int* d_k_j = (int*)(d+pitch*k); const unsigned int k_max = gmin((r+1) * B,n); #pragma unroll for (unsigned int k = r * B; k < k_max; ++k) { __syncthreads(); int new_d = d_i_j[threadIdx.x][k-r*B]+d_i_j[k-r*B][threadIdx.y]; if(d_i_j[threadIdx.x][threadIdx.y]>new_d){ d_i_j[threadIdx.x][threadIdx.y]=new_d; } } if(origin_path>d_i_j[threadIdx.x][threadIdx.y]&&i<n&&j<n){ d_i[j]=d_i_j[threadIdx.x][threadIdx.y]; } } __global__ void kernel_III(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r){ __shared__ int d_i_k[BlockSize][BlockSize+1]; __shared__ int d_k_j[BlockSize][BlockSize+1]; int i = (block_x+blockIdx.x)*B+threadIdx.x; int j = (block_y+blockIdx.y)*B+threadIdx.y; // unsigned int i = (block_x+blockIdx.x)*B+threadIdx.x; // unsigned int j = (block_y+blockIdx.y)*B+threadIdx.y; int* d_i = ((int*)(d+pitch*i)); int path = i<n&&j<n? __ldg(&d_i[j]) : INF; int origin_path = path; if(r*B+threadIdx.y < n && i < n) d_i_k[threadIdx.x][threadIdx.y] = __ldg(&d_i[r*B+threadIdx.y]); else d_i_k[threadIdx.x][threadIdx.y] = INF; if(r*B+threadIdx.x < n && j < n) d_k_j[threadIdx.x][threadIdx.y] = __ldg(&((int*)(d+pitch*(r*B+threadIdx.x)))[j]); else d_k_j[threadIdx.x][threadIdx.y] = INF; __syncthreads(); // const int k_max = gmin((r+1) * B , n); const unsigned int k_max = gmin((r+1) * B,n); #pragma unroll for (unsigned int k = r * B; k < k_max; ++k) { int new_path = d_i_k[threadIdx.x][k-r*B]+d_k_j[k-r*B][threadIdx.y]; if(path>new_path) path = new_path; } if(origin_path>path&&i<n&&j<n){ d_i[j]=path; } } __global__ void kernel_II(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r){ __shared__ int d_i_k[BlockSize][BlockSize+1]; __shared__ int d_k_j[BlockSize][BlockSize+1]; unsigned int i, j; if(blockIdx.x==0){ i = r*B + threadIdx.x; j = blockIdx.y * B + threadIdx.y ; } else{ i = blockIdx.y * B + threadIdx.x ; j = r*B + threadIdx.y; } // int i = (block_x+blockIdx.x)>=r? // (block_x+blockIdx.x+1)*B+threadIdx.x:(block_x+blockIdx.x)*B+threadIdx.x; // int j = (block_y+blockIdx.y)>=r? // (block_y+blockIdx.y+1)*B+threadIdx.y:(block_y+blockIdx.y)*B+threadIdx.y; // int j = (block_y+blockIdx.y)*B+threadIdx.y; int* d_i = (int*)(d+pitch*i); int path = i<n&&j<n? d_i[j] : INF; int origin_path = path; d_i_k[threadIdx.x][threadIdx.y] = i < n && r*B+threadIdx.y < n ? __ldg(&d_i[r*B+threadIdx.y]) : INF; d_k_j[threadIdx.x][threadIdx.y] = j < n && r*B+threadIdx.x < n ? __ldg(&((int*)(d+pitch*(r*B+threadIdx.x)))[j]) : INF; __syncthreads(); const unsigned int k_max = gmin((r+1) * B , n); #pragma unroll for (unsigned int k = r * B; k < k_max; ++k) { // int* d_k = (int*)(d+pitch*k); int new_path = d_i_k[threadIdx.x][k-r*B]+d_k_j[k-r*B][threadIdx.y]; if(path>new_path) path = new_path; } if(origin_path>path&&i<n&&j<n){ d_i[j]=path; } }
027ff3f7eb3100fe9e90a5afe6b1c9510bddbd08.cu
#include <stdio.h> #include <stdlib.h> #include <fstream> #include <chrono> #include <cuda_runtime.h> #include <iostream> #include <vector> #define BlockSize 32 const int INF = 1000000000; void input(char *inFileName); void output(char *outFileName); void block_FW(int B,char*); int ceil(int a, int b); void cal(char* d,size_t pitch,int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height,cudaStream_t stream); void cpu(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height); int n, m; int* d; double io_time = 0; double comp_time = 0; double mem_time = 0; int main(int argc, char* argv[]) { auto io_beg = std::chrono::high_resolution_clock::now(); input(argv[1]); auto io_end = std::chrono::high_resolution_clock::now(); io_time += std::chrono::duration<double>(io_end-io_beg).count(); int B = BlockSize; block_FW(B,argv[2]); io_beg = std::chrono::high_resolution_clock::now(); output(argv[2]); io_end = std::chrono::high_resolution_clock::now(); io_time += std::chrono::duration<double>(io_end-io_beg).count(); std::cout<< comp_time <<" "<<mem_time<<" "<<io_time; delete d; return 0; } void input(char* infile) { FILE* file = fopen(infile, "rb"); fread(&n, sizeof(int), 1, file); fread(&m, sizeof(int), 1, file); int *buf = new int[m*3]; d = new int[n*n]; fread(buf, sizeof(int), 3*m, file); #pragma omp parallel for for (int i = 0; i < n; ++ i) { for (int j = 0; j < n; ++ j) { if (i == j) { d[i*n+j] = 0; } else { d[i*n+j] = INF; } } } #pragma omp parallel for for (int i = 0; i < m; ++ i) { int pair[3]; // fread(pair, sizeof(int), 3, file); for(int j=0;j<3;j++) pair[j]=buf[i*3+j]; d[pair[0]*n+pair[1]] = pair[2]; } fclose(file); delete buf; } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); fwrite(d, sizeof(int), n*n, outfile); fclose(outfile); } int ceil(int a, int b) { return (a + b - 1) / b; } __global__ void kernel_I(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r); __global__ void kernel_II(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r); __global__ void kernel_III(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r); inline void moveBlock(char** ptr,int dst, int src,int x,int y,int B,size_t pitch){ for(int k=B*x;k<B*(x+1);k++) cudaMemcpyPeer(ptr[dst]+pitch*B*k+sizeof(int)*B*y,dst, ptr[src]+pitch*B*k+sizeof(int)*B*y,src, sizeof(int)*B); } void block_FW(int B, char* outFileName) { int round = ceil(n, B); char *device_d[2]; size_t pitch[2]; // cudaMalloc(&device_d,sizeof(int)*n*n); // cudaMemcpy(device_d,d,sizeof(int)*n*n,cudaMemcpyHostToDevice); auto mem_beg = std::chrono::high_resolution_clock::now(); for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceEnablePeerAccess(!dev,0); cudaMallocPitch(&device_d[dev],&pitch[dev],sizeof(int)*round*B,round*B); cudaMemcpy2DAsync(device_d[dev],pitch[dev], d,sizeof(int)*n, sizeof(int)*n,n,cudaMemcpyHostToDevice); } auto mem_end = std::chrono::high_resolution_clock::now(); mem_time += std::chrono::duration<double>(mem_end-mem_beg).count(); for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceSynchronize(); } auto comp_beg = std::chrono::high_resolution_clock::now(); for (int r = 0; r <= round; ++r) { dim3 dimBlock(B,B); dim3 dimGrid(1,1); // for(int dev=0;dev<2;dev++){ // cudaSetDevice(dev); // kernel_I <<<dimGrid,dimBlock,0,0>>>(device_d[dev],pitch[dev],r,r,n,B,r); // dimGrid = dim3(2,round-1); // kernel_II <<<dimGrid,dimBlock,0,0>>>(device_d[dev],pitch[dev],0,0,n,B,r); // } for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceSynchronize(); } cudaStream_t streams[2][round-round/2]; for(int i=0;i<round-round/2;i++) for(int j=0;j<2;j++) cudaStreamCreate(&streams[j][i]); #pragma omp parallel sections { #pragma omp section { cudaSetDevice(0); kernel_I <<<dimGrid,dimBlock,0,0>>>(device_d[0],pitch[0],r,r,n,B,r); dimGrid = dim3(2,round-1); kernel_II <<<dimGrid,dimBlock,0,0>>>(device_d[0],pitch[0],0,0,n,B,r); dimGrid = dim3(1,round); for(int i=0;i<round/2;i++){ kernel_III<<<dimGrid,dimBlock,0,streams[0][i]>>>(device_d[0],pitch[0],i,0,n,B,r); // cudaMemcpyPeerAsync(device_d[0]+i*pitch[0]*B,0, // device_d[1]+i*pitch[1]*B,1, // pitch[1]*B,streams[1]); cudaMemcpy2DAsync(device_d[0]+i*pitch[0]*B,pitch[0], device_d[1]+i*pitch[1]*B,pitch[1], sizeof(int)*n,B,cudaMemcpyDefault,streams[0][i]); } cudaDeviceSynchronize(); } #pragma omp section { cudaSetDevice(1); kernel_I <<<dimGrid,dimBlock,0,0>>>(device_d[1],pitch[1],r,r,n,B,r); dimGrid = dim3(2,round-1); kernel_II <<<dimGrid,dimBlock,0,0>>>(device_d[1],pitch[1],0,0,n,B,r); dimGrid = dim3(1,round); for(int i=round/2;i<round;i++){ kernel_III<<<dimGrid,dimBlock,0,streams[1][i-round/2]>>>(device_d[1],pitch[1],i,0,n,B,r); // cudaMemcpyPeerAsync(device_d[1]+i*pitch[1]*B,1, // device_d[0]+i*pitch[0]*B,0, // pitch[0]*B,streams[1]); cudaMemcpy2DAsync(device_d[1]+i*pitch[1]*B,pitch[1], device_d[0]+i*pitch[0]*B,pitch[0], sizeof(int)*n,B,cudaMemcpyDefault,streams[1][i-round/2]); } cudaDeviceSynchronize(); } } for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceSynchronize(); } } auto comp_end = std::chrono::high_resolution_clock::now(); comp_time += std::chrono::duration<double>(comp_end-comp_beg).count(); mem_beg = std::chrono::high_resolution_clock::now(); cudaSetDevice(0); cudaMemcpy2DAsync(d,sizeof(int)*n, device_d[0],pitch[0], sizeof(int)*n,round/2*B,cudaMemcpyDeviceToHost); cudaSetDevice(1); cudaMemcpy2DAsync(d+round/2*B*n,sizeof(int)*n, device_d[1]+round/2*B*pitch[1],pitch[1], sizeof(int)*n,n-round/2*B,cudaMemcpyDeviceToHost); for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceSynchronize(); } mem_end = std::chrono::high_resolution_clock::now(); mem_time += std::chrono::duration<double>(mem_end-mem_beg).count(); // cudaMemcpy(d,device_d,sizeof(int)*n*n,cudaMemcpyDeviceToHost); for(int dev=0;dev<2;dev++) cudaFree(device_d[dev]); } __device__ inline int gmin(int a,int b){ return (a>b)*b+(a<=b)*a; } __global__ void kernel_I(char* d,size_t pitch,int block_x, int block_y,int n,int B, int r){ __shared__ int d_i_j[BlockSize][BlockSize+1]; const unsigned int i = block_x*B+threadIdx.x; const unsigned int j = block_y*B+threadIdx.y; // const int idx = threadIdx.y*blockDim.x*threadIdx.x; int* d_i = (int*)(d+pitch*i); unsigned int origin_path = i<n&&j<n? __ldg(&d_i[j]) : INF; d_i_j[threadIdx.x][threadIdx.y] = origin_path; // int* d_k_j = (int*)(d+pitch*k); const unsigned int k_max = gmin((r+1) * B,n); #pragma unroll for (unsigned int k = r * B; k < k_max; ++k) { __syncthreads(); int new_d = d_i_j[threadIdx.x][k-r*B]+d_i_j[k-r*B][threadIdx.y]; if(d_i_j[threadIdx.x][threadIdx.y]>new_d){ d_i_j[threadIdx.x][threadIdx.y]=new_d; } } if(origin_path>d_i_j[threadIdx.x][threadIdx.y]&&i<n&&j<n){ d_i[j]=d_i_j[threadIdx.x][threadIdx.y]; } } __global__ void kernel_III(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r){ __shared__ int d_i_k[BlockSize][BlockSize+1]; __shared__ int d_k_j[BlockSize][BlockSize+1]; int i = (block_x+blockIdx.x)*B+threadIdx.x; int j = (block_y+blockIdx.y)*B+threadIdx.y; // unsigned int i = (block_x+blockIdx.x)*B+threadIdx.x; // unsigned int j = (block_y+blockIdx.y)*B+threadIdx.y; int* d_i = ((int*)(d+pitch*i)); int path = i<n&&j<n? __ldg(&d_i[j]) : INF; int origin_path = path; if(r*B+threadIdx.y < n && i < n) d_i_k[threadIdx.x][threadIdx.y] = __ldg(&d_i[r*B+threadIdx.y]); else d_i_k[threadIdx.x][threadIdx.y] = INF; if(r*B+threadIdx.x < n && j < n) d_k_j[threadIdx.x][threadIdx.y] = __ldg(&((int*)(d+pitch*(r*B+threadIdx.x)))[j]); else d_k_j[threadIdx.x][threadIdx.y] = INF; __syncthreads(); // const int k_max = gmin((r+1) * B , n); const unsigned int k_max = gmin((r+1) * B,n); #pragma unroll for (unsigned int k = r * B; k < k_max; ++k) { int new_path = d_i_k[threadIdx.x][k-r*B]+d_k_j[k-r*B][threadIdx.y]; if(path>new_path) path = new_path; } if(origin_path>path&&i<n&&j<n){ d_i[j]=path; } } __global__ void kernel_II(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r){ __shared__ int d_i_k[BlockSize][BlockSize+1]; __shared__ int d_k_j[BlockSize][BlockSize+1]; unsigned int i, j; if(blockIdx.x==0){ i = r*B + threadIdx.x; j = blockIdx.y * B + threadIdx.y ; } else{ i = blockIdx.y * B + threadIdx.x ; j = r*B + threadIdx.y; } // int i = (block_x+blockIdx.x)>=r? // (block_x+blockIdx.x+1)*B+threadIdx.x:(block_x+blockIdx.x)*B+threadIdx.x; // int j = (block_y+blockIdx.y)>=r? // (block_y+blockIdx.y+1)*B+threadIdx.y:(block_y+blockIdx.y)*B+threadIdx.y; // int j = (block_y+blockIdx.y)*B+threadIdx.y; int* d_i = (int*)(d+pitch*i); int path = i<n&&j<n? d_i[j] : INF; int origin_path = path; d_i_k[threadIdx.x][threadIdx.y] = i < n && r*B+threadIdx.y < n ? __ldg(&d_i[r*B+threadIdx.y]) : INF; d_k_j[threadIdx.x][threadIdx.y] = j < n && r*B+threadIdx.x < n ? __ldg(&((int*)(d+pitch*(r*B+threadIdx.x)))[j]) : INF; __syncthreads(); const unsigned int k_max = gmin((r+1) * B , n); #pragma unroll for (unsigned int k = r * B; k < k_max; ++k) { // int* d_k = (int*)(d+pitch*k); int new_path = d_i_k[threadIdx.x][k-r*B]+d_k_j[k-r*B][threadIdx.y]; if(path>new_path) path = new_path; } if(origin_path>path&&i<n&&j<n){ d_i[j]=path; } }
54511f40e749ae34d8b41254a557c9f4fcb4a234.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "transpose.cuh" __global__ void transposeDiagonal(float *odata, float *idata, int width, int height, int nreps) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int blockIdx_x, blockIdx_y; // diagonal reordering if (width == height) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { int bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } int xIndex = blockIdx_x*TILE_DIM + threadIdx.x; int yIndex = blockIdx_y*TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx_y*TILE_DIM + threadIdx.x; yIndex = blockIdx_x*TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int r=0; r < nreps; r++) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } } void transposeImage(float *dev_in, float *dev_out, rect2d image) { dim3 grid(image.width/TILE_DIM, image.height/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); hipLaunchKernelGGL(( transposeDiagonal), dim3(grid),dim3(threads), 0, 0, dev_out, dev_in, image.width, image.height, 1); } cudaImage createTransposedImage(cudaImage input) { cudaImage flipped = createImage(input.height, input.width); rect2d inputDim = {input.width, input.height}; transposeImage(getData(input), getData(flipped), inputDim); return flipped; }
54511f40e749ae34d8b41254a557c9f4fcb4a234.cu
#include "transpose.cuh" __global__ void transposeDiagonal(float *odata, float *idata, int width, int height, int nreps) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int blockIdx_x, blockIdx_y; // diagonal reordering if (width == height) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { int bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } int xIndex = blockIdx_x*TILE_DIM + threadIdx.x; int yIndex = blockIdx_y*TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx_y*TILE_DIM + threadIdx.x; yIndex = blockIdx_x*TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int r=0; r < nreps; r++) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } } void transposeImage(float *dev_in, float *dev_out, rect2d image) { dim3 grid(image.width/TILE_DIM, image.height/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); transposeDiagonal<<<grid,threads>>>(dev_out, dev_in, image.width, image.height, 1); } cudaImage createTransposedImage(cudaImage input) { cudaImage flipped = createImage(input.height, input.width); rect2d inputDim = {input.width, input.height}; transposeImage(getData(input), getData(flipped), inputDim); return flipped; }
768570b5e56f6e51d55bd7853d1f884794dc7451.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "rocblas.h" #include "Utilities.cuh" int main() { const unsigned int N = 3; const unsigned int Nmatrices = 1; hipblasHandle_t handle; cublasSafeCall(hipblasCreate(&handle)); // --- Matrices to be inverted (only one in this example) float *h_A = new float[N*N*Nmatrices]; h_A[0] = 4.f; h_A[1] = 3.f; h_A[2] = 8.f; h_A[3] = 9.f; h_A[4] = 5.f; h_A[5] = 1.f; h_A[6] = 2.f; h_A[7] = 7.f; h_A[8] = 6.f; // --- Allocate device matrices float *d_A; gpuErrchk(hipMalloc((void**)&d_A, N*N*Nmatrices*sizeof(float))); // --- Move the matrix to be inverted from host to device gpuErrchk(hipMemcpy(d_A,h_A,N*N*Nmatrices*sizeof(float),hipMemcpyHostToDevice)); // --- Creating the array of pointers needed as input to the batched getrf float **h_inout_pointers = (float **)malloc(Nmatrices*sizeof(float *)); for (int i=0; i<Nmatrices; i++) h_inout_pointers[i]=(float *)((char*)d_A+i*((size_t)N*N)*sizeof(float)); float **d_inout_pointers; gpuErrchk(hipMalloc((void**)&d_inout_pointers, Nmatrices*sizeof(float *))); gpuErrchk(hipMemcpy(d_inout_pointers,h_inout_pointers,Nmatrices*sizeof(float *),hipMemcpyHostToDevice)); free(h_inout_pointers); int *d_PivotArray; gpuErrchk(hipMalloc((void**)&d_PivotArray, N*Nmatrices*sizeof(int))); int *d_InfoArray; gpuErrchk(hipMalloc((void**)&d_InfoArray, Nmatrices*sizeof(int))); int *h_PivotArray = (int *)malloc(N*Nmatrices*sizeof(int)); int *h_InfoArray = (int *)malloc( Nmatrices*sizeof(int)); cublasSafeCall(hipblasSgetrfBatched(handle, N, d_inout_pointers, N, d_PivotArray, d_InfoArray, Nmatrices)); //cublasSafeCall(hipblasSgetrfBatched(handle, N, d_inout_pointers, N, NULL, d_InfoArray, Nmatrices)); gpuErrchk(hipMemcpy(h_InfoArray,d_InfoArray,Nmatrices*sizeof(int),hipMemcpyDeviceToHost)); for (int i = 0; i < Nmatrices; i++) if (h_InfoArray[i] != 0) { fprintf(stderr, "Factorization of matrix %d Failed: Matrix may be singular\n", i); hipDeviceReset(); exit(EXIT_FAILURE); } gpuErrchk(hipMemcpy(h_A,d_A,N*N*sizeof(float),hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_PivotArray,d_PivotArray,N*Nmatrices*sizeof(int),hipMemcpyDeviceToHost)); for (int i=0; i<N*N; i++) printf("A[%i]=%f\n", i, h_A[i]); printf("\n\n"); for (int i=0; i<N; i++) printf("P[%i]=%i\n", i, h_PivotArray[i]); return 0; }
768570b5e56f6e51d55bd7853d1f884794dc7451.cu
#include <stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cublas_v2.h" #include "Utilities.cuh" int main() { const unsigned int N = 3; const unsigned int Nmatrices = 1; cublasHandle_t handle; cublasSafeCall(cublasCreate(&handle)); // --- Matrices to be inverted (only one in this example) float *h_A = new float[N*N*Nmatrices]; h_A[0] = 4.f; h_A[1] = 3.f; h_A[2] = 8.f; h_A[3] = 9.f; h_A[4] = 5.f; h_A[5] = 1.f; h_A[6] = 2.f; h_A[7] = 7.f; h_A[8] = 6.f; // --- Allocate device matrices float *d_A; gpuErrchk(cudaMalloc((void**)&d_A, N*N*Nmatrices*sizeof(float))); // --- Move the matrix to be inverted from host to device gpuErrchk(cudaMemcpy(d_A,h_A,N*N*Nmatrices*sizeof(float),cudaMemcpyHostToDevice)); // --- Creating the array of pointers needed as input to the batched getrf float **h_inout_pointers = (float **)malloc(Nmatrices*sizeof(float *)); for (int i=0; i<Nmatrices; i++) h_inout_pointers[i]=(float *)((char*)d_A+i*((size_t)N*N)*sizeof(float)); float **d_inout_pointers; gpuErrchk(cudaMalloc((void**)&d_inout_pointers, Nmatrices*sizeof(float *))); gpuErrchk(cudaMemcpy(d_inout_pointers,h_inout_pointers,Nmatrices*sizeof(float *),cudaMemcpyHostToDevice)); free(h_inout_pointers); int *d_PivotArray; gpuErrchk(cudaMalloc((void**)&d_PivotArray, N*Nmatrices*sizeof(int))); int *d_InfoArray; gpuErrchk(cudaMalloc((void**)&d_InfoArray, Nmatrices*sizeof(int))); int *h_PivotArray = (int *)malloc(N*Nmatrices*sizeof(int)); int *h_InfoArray = (int *)malloc( Nmatrices*sizeof(int)); cublasSafeCall(cublasSgetrfBatched(handle, N, d_inout_pointers, N, d_PivotArray, d_InfoArray, Nmatrices)); //cublasSafeCall(cublasSgetrfBatched(handle, N, d_inout_pointers, N, NULL, d_InfoArray, Nmatrices)); gpuErrchk(cudaMemcpy(h_InfoArray,d_InfoArray,Nmatrices*sizeof(int),cudaMemcpyDeviceToHost)); for (int i = 0; i < Nmatrices; i++) if (h_InfoArray[i] != 0) { fprintf(stderr, "Factorization of matrix %d Failed: Matrix may be singular\n", i); cudaDeviceReset(); exit(EXIT_FAILURE); } gpuErrchk(cudaMemcpy(h_A,d_A,N*N*sizeof(float),cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_PivotArray,d_PivotArray,N*Nmatrices*sizeof(int),cudaMemcpyDeviceToHost)); for (int i=0; i<N*N; i++) printf("A[%i]=%f\n", i, h_A[i]); printf("\n\n"); for (int i=0; i<N; i++) printf("P[%i]=%i\n", i, h_PivotArray[i]); return 0; }
b653e580cdc28520e57b128c2fb191edcb8e0e5a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace nd4j { //////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execConvertToHalf(void *dx, Nd4jLong n, half *dz) { auto x = reinterpret_cast<T *>(dx); int tid = threadIdx.x + blockIdx.x * blockDim.x; for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) dz[i] = __float2half(static_cast<T>(x[i])); } //////////////////////////////////////////////////////////////////////// template<typename T> __host__ void convertToHalfGeneric(dim3 &launchDims, hipStream_t *stream, void *dx, Nd4jLong n, half *dz) { hipLaunchKernelGGL(( execConvertToHalf<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, dx, n, dz); nd4j::DebugHelper::checkErrorCode(stream, "convertToHalfs(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT convertToHalfGeneric, (dim3 & launchDims, hipStream_t * stream, void * dx, Nd4jLong n, half * dz), LIBND4J_TYPES); }
b653e580cdc28520e57b128c2fb191edcb8e0e5a.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace nd4j { //////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execConvertToHalf(void *dx, Nd4jLong n, half *dz) { auto x = reinterpret_cast<T *>(dx); int tid = threadIdx.x + blockIdx.x * blockDim.x; for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) dz[i] = __float2half(static_cast<T>(x[i])); } //////////////////////////////////////////////////////////////////////// template<typename T> __host__ void convertToHalfGeneric(dim3 &launchDims, cudaStream_t *stream, void *dx, Nd4jLong n, half *dz) { execConvertToHalf<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dx, n, dz); nd4j::DebugHelper::checkErrorCode(stream, "convertToHalfs(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT convertToHalfGeneric, (dim3 & launchDims, cudaStream_t * stream, void * dx, Nd4jLong n, half * dz), LIBND4J_TYPES); }
ba6882c1e2d1e687d829ddff8eff3d8895a8ec5b.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2021 by Contributors * \file nccl_api.cc * \brief Implementation of wrapper around NCCL routines. */ #ifdef DGL_USE_NCCL #include "nccl_api.h" #include <dgl/array.h> #include <dgl/aten/array_ops.h> #include <dgl/runtime/container.h> #include <dgl/runtime/device_api.h> #include <dgl/packed_func_ext.h> #include <dgl/runtime/registry.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <cmath> #include <sstream> #include <iomanip> #include <utility> #include <vector> #include <memory> #include <string> #include <limits> #include "cuda_common.h" #include "../../runtime/workspace.h" #include "../../partition/ndarray_partition.h" #include "../../array/cuda/dgl_cub.cuh" #include "../../array/cuda/array_index_select.cuh" #define NCCL_CALL(func) \ { \ ncclResult_t result = func; \ if (result != ncclSuccess) { \ LOG(FATAL) \ << "NCCLError: " #func " failed with error: " << result; \ } \ } namespace dgl { using namespace partition; namespace runtime { namespace cuda { namespace { enum class AllToAllMode : int { REMAINDER = 0 }; template<typename T> ncclDataType_t NCCLType(); template<> ncclDataType_t NCCLType<int32_t>() { return ncclInt32; } template<> ncclDataType_t NCCLType<int64_t>() { return ncclInt64; } template<> ncclDataType_t NCCLType<__half>() { return ncclHalf; } template<> ncclDataType_t NCCLType<float>() { return ncclFloat32; } template<> ncclDataType_t NCCLType<double>() { return ncclFloat64; } template<typename IdType, typename DType> __global__ void _DualPermKernel( const IdType * const in_idx, const DType * const in_value, const IdType * const perm, const int64_t num_in, const int64_t num_feat, IdType * const out_idx, DType * const out_value) { // set index permutation const int64_t tidx = blockDim.x*static_cast<int64_t>(blockIdx.x)+threadIdx.x; if (tidx < num_in) { const IdType perm_idx = perm[tidx]; assert(perm_idx < num_in); out_idx[tidx] = in_idx[perm_idx]; } if (num_feat > 1) { for (int d = 0; d < blockDim.x; ++d) { const int64_t bidx = blockDim.x*static_cast<int64_t>(blockIdx.x) + d; if (bidx < num_in) { const IdType perm_idx = perm[bidx]; for (int64_t f = threadIdx.x; f < num_feat; f+=blockDim.x) { out_value[bidx*num_feat+f] = in_value[perm_idx*num_feat+f]; } } } } else { if (tidx < num_in) { const IdType perm_idx = perm[tidx]; out_value[tidx] = in_value[perm_idx]; } } } template <typename DType, typename IdType> __global__ void _InversePermKernel( const DType* const array, const int64_t num_feat, int64_t length, const IdType* const perm, DType* const out) { int64_t in_row = blockIdx.x*blockDim.y+threadIdx.y; const int64_t stride = blockDim.y*gridDim.x; while (in_row < length) { int64_t col = threadIdx.x; const int64_t out_row = perm[in_row]; while (col < num_feat) { out[out_row*num_feat+col] = array[in_row*num_feat+col]; col += blockDim.x; } in_row += stride; } } template<typename IdType, typename DType> std::pair<IdArray, NDArray> SparsePush( NCCLCommunicatorRef comm, IdArray in_idx, NDArray in_value, NDArrayPartitionRef part) { const auto& ctx = in_idx->ctx; CHECK_EQ(ctx, in_value->ctx) << "Indices and values must be on the same " "device"; auto device = DeviceAPI::Get(ctx); // TODO(dlasalle): Get the stream from the device context. hipStream_t stream = 0; CHECK_LE(in_idx->ndim, 1) << "The tensor of sending indices must be of " "dimension one (or empty)."; const int64_t num_in = in_idx->ndim > 0 ? in_idx->shape[0] : 0; CHECK_EQ(num_in, in_value->ndim > 0 ? in_value->shape[0] : 0) << "Leading dimension of indices (" << num_in << ") must match " "leading dimension of values (" << (in_value->ndim > 0 ? in_value->shape[0] : 0) << ")."; int64_t num_feat = 1; for (int d = 1; d < in_value->ndim; ++d) { num_feat *= in_value->shape[d]; } const int64_t comm_size = comm->size(); if (comm_size == 1) { // nothing to do, just return original arrays return std::pair<IdArray, NDArray>(in_idx, in_value); } std::pair<IdArray, NDArray> part_perm = part->GeneratePermutation(in_idx); const IdType * const perm = static_cast<const IdType*>(part_perm.first->data); const int64_t * const send_sum = static_cast<const int64_t*>(part_perm.second->data); Workspace<IdType> send_idx(device, ctx, num_in); Workspace<DType> send_value(device, ctx, num_in*num_feat); // permute the indices and values if (num_in > 0) { const dim3 block(256); const dim3 grid((num_in+block.x-1)/block.x); hipLaunchKernelGGL(( _DualPermKernel), dim3(grid), dim3(block), 0, stream, static_cast<const IdType*>(in_idx->data), static_cast<const DType*>(in_value->data), perm, num_in, num_feat, send_idx.get(), send_value.get()); CUDA_CALL(hipGetLastError()); } // compute the prefix sum of the send values Workspace<int64_t> send_prefix(device, ctx, comm_size+1); { size_t prefix_workspace_size; CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(nullptr, prefix_workspace_size, send_sum, send_prefix.get(), comm_size+1, stream)); Workspace<void> prefix_workspace(device, ctx, prefix_workspace_size); CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(prefix_workspace.get(), prefix_workspace_size, send_sum, send_prefix.get(), comm_size+1, stream)); } std::vector<int64_t> send_prefix_host(comm_size+1); device->CopyDataFromTo( send_prefix.get(), 0, send_prefix_host.data(), 0, send_prefix_host.size()*sizeof(*send_prefix.get()), ctx, DGLContext{kDLCPU, 0}, DGLType{kDLInt, sizeof(*send_prefix.get())*8, 1}, stream); send_prefix.free(); CHECK_EQ(send_prefix_host.back(), num_in) << "Internal Error: " "send_prefix_host.back() = " << send_prefix_host.back() << ", and num_in = " << num_in; // communicate the amount to send Workspace<int64_t> recv_sum(device, ctx, comm_size+1); comm->AllToAll(send_sum, recv_sum.get(), 1, stream); hipEvent_t d2h; hipEventCreate(&d2h); // compute the prefix sum of the recv values Workspace<int64_t> recv_prefix(device, ctx, comm_size+1); { size_t prefix_workspace_size; CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(nullptr, prefix_workspace_size, recv_sum.get(), recv_prefix.get(), comm_size+1)); Workspace<void> prefix_workspace(device, ctx, prefix_workspace_size); CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(prefix_workspace.get(), prefix_workspace_size, recv_sum.get(), recv_prefix.get(), comm_size+1)); } recv_sum.free(); // finally copy the prefixsum sum down to the host std::vector<int64_t> recv_prefix_host(comm_size+1); device->CopyDataFromTo( recv_prefix.get(), 0, recv_prefix_host.data(), 0, recv_prefix_host.size()*sizeof(*recv_prefix.get()), ctx, DGLContext{kDLCPU, 0}, DGLType{kDLInt, sizeof(*recv_prefix.get())*8, 1}, stream); recv_prefix.free(); // use an event to track when copying is done hipEventRecord(d2h, stream); // allocate output space hipEventSynchronize(d2h); hipEventDestroy(d2h); IdArray recv_idx = aten::NewIdArray( recv_prefix_host.back(), ctx, sizeof(IdType)*8); std::vector<int64_t> value_shape(in_value->ndim, 0); value_shape[0] = recv_prefix_host.back(); for (int d = 1; d < in_value->ndim; ++d) { value_shape[d] = in_value->shape[d]; } NDArray recv_value = NDArray::Empty(value_shape, in_value->dtype, ctx); // send data comm->SparseAllToAll( send_idx.get(), send_value.get(), num_feat, send_prefix_host.data(), static_cast<IdType*>(recv_idx->data), static_cast<DType*>(recv_value->data), recv_prefix_host.data(), stream); return std::pair<IdArray, NDArray>(recv_idx, recv_value); } template<typename IdType, typename DType> NDArray SparsePull( NCCLCommunicatorRef comm, IdArray req_idx, NDArray local_tensor, NDArrayPartitionRef part) { const auto& ctx = req_idx->ctx; CHECK_EQ(ctx, local_tensor->ctx) << "The request indices and set of local " "values must be on the same device"; auto device = DeviceAPI::Get(ctx); hipStream_t stream = CUDAThreadEntry::ThreadLocal()->stream; CHECK_LE(req_idx->ndim, 1) << "The tensor of requested indices must be of " "dimension one (or empty)."; const int64_t num_in = req_idx->ndim > 0 ? req_idx->shape[0] : 0; int64_t num_feat = 1; for (int d = 1; d < local_tensor->ndim; ++d) { num_feat *= local_tensor->shape[d]; } const int64_t comm_size = comm->size(); if (comm_size == 1) { // Just return index selection from current local_tensor return aten::IndexSelect(local_tensor, req_idx); } // First we need to send our requests to other processors. This means // re-ordering our index array to be contiguous among processors, and // counting the number of indices we are sending each processor. For now, // we assume a poorly partitioned graph, and that there exists the // possibility that each processor could request data from this one. // the buffer for us to re-order our requests in Workspace<IdType> send_idx(device, ctx, num_in); std::pair<IdArray, NDArray> part_perm = part->GeneratePermutation(req_idx); const IdType * const perm = static_cast<const IdType*>(part_perm.first->data); const int64_t * const send_sum = static_cast<const int64_t*>(part_perm.second->data); // permute requests if (num_in > 0) { const dim3 block(256); const dim3 grid((num_in+block.x-1)/block.x); hipLaunchKernelGGL(( aten::impl::IndexSelectSingleKernel), dim3(grid), dim3(block), 0, stream, static_cast<const IdType*>(req_idx->data), perm, num_in, req_idx->shape[0], send_idx.get()); CUDA_CALL(hipGetLastError()); } // compute the prefix sum of the indexes this process is requesting Workspace<int64_t> request_prefix(device, ctx, comm_size+1); { size_t prefix_workspace_size; CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(nullptr, prefix_workspace_size, send_sum, request_prefix.get(), comm_size+1, stream)); Workspace<void> prefix_workspace(device, ctx, prefix_workspace_size); CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(prefix_workspace.get(), prefix_workspace_size, send_sum, request_prefix.get(), comm_size+1, stream)); } hipEvent_t d2h; hipEventCreate(&d2h); std::vector<int64_t> request_prefix_host(comm_size+1); device->CopyDataFromTo( request_prefix.get(), 0, request_prefix_host.data(), 0, request_prefix_host.size()*sizeof(*request_prefix.get()), ctx, DGLContext{kDLCPU, 0}, DGLType{kDLInt, sizeof(*request_prefix.get())*8, 1}, stream); request_prefix.free(); CHECK_EQ(request_prefix_host.back(), num_in) << "Internal Error: " "request_prefix_host.back() = " << request_prefix_host.back() << ", num_in = " << num_in; // communicate the amount requested Workspace<int64_t> recv_sum(device, ctx, comm_size+1); comm->AllToAll(send_sum, recv_sum.get(), 1, stream); // compute the prefix sum of the requested indexes Workspace<int64_t> response_prefix(device, ctx, comm_size+1); { size_t prefix_workspace_size; CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(nullptr, prefix_workspace_size, recv_sum.get(), response_prefix.get(), comm_size+1, stream)); Workspace<void> prefix_workspace(device, ctx, prefix_workspace_size); CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(prefix_workspace.get(), prefix_workspace_size, recv_sum.get(), response_prefix.get(), comm_size+1, stream)); } recv_sum.free(); // finally copy the prefixsum sum down to the host std::vector<int64_t> response_prefix_host(comm_size+1); device->CopyDataFromTo( response_prefix.get(), 0, response_prefix_host.data(), 0, response_prefix_host.size()*sizeof(*response_prefix.get()), ctx, DGLContext{kDLCPU, 0}, DGLType{kDLInt, sizeof(*response_prefix.get())*8, 1}, stream); response_prefix.free(); // use an event to track when copying is done hipEventRecord(d2h, stream); // allocate output space hipEventSynchronize(d2h); hipEventDestroy(d2h); // gather requested indexes IdArray recv_idx = aten::NewIdArray( response_prefix_host.back(), ctx, sizeof(IdType)*8); comm->AllToAllV( send_idx.get(), request_prefix_host.data(), static_cast<IdType*>(recv_idx->data), response_prefix_host.data(), stream); send_idx.free(); // convert requested indices to local indices depending on partition if (response_prefix_host.back() > 0) { recv_idx = part->MapToLocal(recv_idx); } // and then index select them into place Workspace<DType> filled_response_value(device, ctx, response_prefix_host.back()*num_feat); if (request_prefix_host.back() > 0) { dim3 block(256, 1); while (block.x >= 2*num_feat) { block.x /= 2; block.y *= 2; } const dim3 grid((request_prefix_host.back()+block.y-1)/block.y); hipLaunchKernelGGL(( aten::impl::IndexSelectMultiKernel), dim3(grid), dim3(block), 0, stream, static_cast<const DType*>(local_tensor->data), num_feat, static_cast<IdType*>(recv_idx->data), response_prefix_host.back(), local_tensor->shape[0], filled_response_value.get()); CUDA_CALL(hipGetLastError()); } // we will collect recieved values in this array std::vector<int64_t> value_shape(local_tensor->ndim, 0); value_shape[0] = request_prefix_host.back(); for (int d = 1; d < local_tensor->ndim; ++d) { value_shape[d] = local_tensor->shape[d]; } Workspace<DType> filled_request_value(device, ctx, request_prefix_host.back()*num_feat); // multiply the prefixes by the number of features being sent for (auto& v : request_prefix_host) { v *= num_feat; } for (auto& v : response_prefix_host) { v *= num_feat; } // send the values comm->AllToAllV( filled_response_value.get(), response_prefix_host.data(), filled_request_value.get(), request_prefix_host.data(), stream); filled_response_value.free(); // finally, we need to permute the values back into the requested order NDArray result = NDArray::Empty(value_shape, local_tensor->dtype, ctx); if (num_in > 0) { dim3 block(256, 1); while (block.x >= 2*num_feat) { block.x /= 2; block.y *= 2; } const dim3 grid((num_in+block.y-1)/block.y); hipLaunchKernelGGL(( _InversePermKernel), dim3(grid), dim3(block), 0, stream, filled_request_value.get(), num_feat, num_in, perm, static_cast<DType*>(result->data)); CUDA_CALL(hipGetLastError()); } return result; } } // namespace /* NCCLUniqueId **************************************************************/ NCCLUniqueId::NCCLUniqueId() : id_() { // this ID is unique to the process, not to each call of this function NCCL_CALL(ncclGetUniqueId(&id_)); } ncclUniqueId NCCLUniqueId::Get() const { return id_; } std::string NCCLUniqueId::ToString() const { std::ostringstream oss; oss << std::hex; for (size_t b = 0; b < NCCL_UNIQUE_ID_BYTES; ++b) { const int num = static_cast<uint8_t>(id_.internal[b]); oss << std::setw(2) << std::setfill('0') << num; } std::string result = oss.str(); CHECK_EQ(result.length(), NCCL_UNIQUE_ID_BYTES*2) << "Invalid NCCL ID format: '" << result << "'"; return result; } void NCCLUniqueId::FromString( const std::string& str) { // must be exactly 256 hex characters CHECK_EQ(str.length(), NCCL_UNIQUE_ID_BYTES * 2) << "Invalid NCCL ID format: '" << str << "'"; for (size_t b = 0; b < NCCL_UNIQUE_ID_BYTES; ++b) { id_.internal[b] = std::strtol(str.substr(b*2, 2).c_str(), nullptr, 16); } } /* NCCLCommunicator **********************************************************/ NCCLCommunicator::NCCLCommunicator( const int size, const int rank, ncclUniqueId id) : comm_(), size_(size), rank_(rank) { CHECK_LT(rank, size) << "The rank (" << rank << ") must be smaller than " "the size of the communicator (" << size << ")."; CHECK_GE(rank, 0) << "The rank (" << rank << ") must be greater than or " "equal to 0."; NCCL_CALL(ncclCommInitRank(&comm_, size_, id, rank_)); } NCCLCommunicator::~NCCLCommunicator() { ncclCommDestroy(comm_); } ncclComm_t NCCLCommunicator::Get() { return comm_; } template<typename DType> void NCCLCommunicator::AllToAllV( const DType * const send, const int64_t * const send_prefix, DType * const recv, const int64_t * const recv_prefix, hipStream_t stream) { const ncclDataType_t type = NCCLType<DType>(); NCCL_CALL(ncclGroupStart()); for (int r = 0; r < size_; ++r) { const int64_t send_size = send_prefix[r+1]-send_prefix[r]; if (send_size > 0) { NCCL_CALL(ncclSend(send+send_prefix[r], send_size, type, r, comm_, stream)); } const int64_t recv_size = recv_prefix[r+1]-recv_prefix[r]; if (recv_size > 0) { NCCL_CALL(ncclRecv(recv+recv_prefix[r], recv_size, type, r, comm_, stream)); } } NCCL_CALL(ncclGroupEnd()); } template void NCCLCommunicator::AllToAllV<int32_t>( const int32_t * const send, const int64_t * send_prefix, int32_t * const recv, const int64_t * recv_prefix, hipStream_t stream); template void NCCLCommunicator::AllToAllV<int64_t>( const int64_t * const send, const int64_t * send_prefix, int64_t * const recv, const int64_t * recv_prefix, hipStream_t stream); template void NCCLCommunicator::AllToAllV<float>( const float * const send, const int64_t * send_prefix, float * const recv, const int64_t * recv_prefix, hipStream_t stream); template void NCCLCommunicator::AllToAllV<__half>( const __half * const send, const int64_t * send_prefix, __half * const recv, const int64_t * recv_prefix, hipStream_t stream); template<typename IdType> void NCCLCommunicator::AllToAll( const IdType * const send, IdType * const recv, const int64_t count, hipStream_t stream) { const ncclDataType_t type = NCCLType<IdType>(); ncclGroupStart(); for (int r = 0; r < size_; ++r) { ncclSend(send+(r*count), count, type, r, comm_, stream); ncclRecv(recv+(r*count), count, type, r, comm_, stream); } ncclGroupEnd(); } template void NCCLCommunicator::AllToAll<int32_t>( const int32_t * const send, int32_t * const recv, const int64_t count, hipStream_t stream); template void NCCLCommunicator::AllToAll<int64_t>( const int64_t * const send, int64_t * const recv, const int64_t count, hipStream_t stream); template<typename IdType, typename DType> void NCCLCommunicator::SparseAllToAll( const IdType * const send_idx, const DType * const send_value, const int64_t num_feat, const int64_t * const send_prefix, IdType * const recv_idx, DType * const recv_value, const int64_t * const recv_prefix, hipStream_t stream) { const ncclDataType_t idx_type = NCCLType<IdType>(); const ncclDataType_t value_type = NCCLType<DType>(); ncclGroupStart(); for (int r = 0; r < size_; ++r) { const int64_t send_size = send_prefix[r+1]-send_prefix[r]; if (send_size > 0) { ncclSend(send_idx+send_prefix[r], send_size, idx_type, r, comm_, stream); ncclSend(send_value+send_prefix[r]*num_feat, send_size*num_feat, value_type, r, comm_, stream); } const int64_t recv_size = recv_prefix[r+1]-recv_prefix[r]; if (recv_size > 0) { ncclRecv(recv_idx+recv_prefix[r], recv_size, idx_type, r, comm_, stream); ncclRecv(recv_value+recv_prefix[r]*num_feat, recv_size*num_feat, value_type, r, comm_, stream); } } ncclGroupEnd(); } template void NCCLCommunicator::SparseAllToAll<int32_t, __half>( const int32_t * const send_idx, const __half * const send_value, const int64_t num_feat, const int64_t * const send_prefix, int32_t * const recv_idx, __half * const recv_value, const int64_t * const recv_prefix, hipStream_t stream); template void NCCLCommunicator::SparseAllToAll<int64_t, __half>( const int64_t * const send_idx, const __half * const send_value, const int64_t num_feat, const int64_t * const send_prefix, int64_t * const recv_idx, __half * const recv_value, const int64_t * const recv_prefix, hipStream_t stream); int NCCLCommunicator::size() const { return size_; } int NCCLCommunicator::rank() const { return rank_; } /* CAPI **********************************************************************/ DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLGetUniqueId") .set_body([] (DGLArgs args, DGLRetValue* rv) { *rv = NCCLUniqueIdRef(std::make_shared<NCCLUniqueId>()); }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLUniqueIdToString") .set_body([] (DGLArgs args, DGLRetValue* rv) { NCCLUniqueIdRef idObj = args[0]; *rv = idObj->ToString(); }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLUniqueIdFromString") .set_body([] (DGLArgs args, DGLRetValue* rv) { const std::string str = args[0]; NCCLUniqueIdRef ref(std::make_shared<NCCLUniqueId>()); ref->FromString(str); *rv = ref; }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLCreateComm") .set_body([] (DGLArgs args, DGLRetValue* rv) { const int size = args[0]; const int rank = args[1]; NCCLUniqueIdRef idObj = args[2]; *rv = NCCLCommunicatorRef(std::make_shared<NCCLCommunicator>(size, rank, idObj->Get())); }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLSparseAllToAllPush") .set_body([] (DGLArgs args, DGLRetValue* rv) { NCCLCommunicatorRef comm = args[0]; IdArray in_idx = args[1]; NDArray in_values = args[2]; NDArrayPartitionRef part = args[3]; List<ObjectRef> ret; ATEN_ID_TYPE_SWITCH(in_idx->dtype, IdType, { ATEN_DTYPE_SWITCH(in_values->dtype, DType, "values", { auto result = SparsePush<IdType, DType>(comm, in_idx, in_values, part); ret.push_back(Value(MakeValue(result.first))); ret.push_back(Value(MakeValue(result.second))); }); }); *rv = ret; }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLSparseAllToAllPull") .set_body([] (DGLArgs args, DGLRetValue* rv) { NCCLCommunicatorRef comm = args[0]; // the indexes this process is requesting from others IdArray req_idx = args[1]; // the tensor this process has to fulfill other requests NDArray tensor = args[2]; NDArrayPartitionRef part = args[3]; ATEN_ID_TYPE_SWITCH(req_idx->dtype, IdType, { ATEN_DTYPE_SWITCH(tensor->dtype, DType, "values", { *rv = SparsePull<IdType, DType>(comm, req_idx, tensor, part); }); }); }); } // namespace cuda } // namespace runtime } // namespace dgl #endif
ba6882c1e2d1e687d829ddff8eff3d8895a8ec5b.cu
/*! * Copyright (c) 2021 by Contributors * \file nccl_api.cc * \brief Implementation of wrapper around NCCL routines. */ #ifdef DGL_USE_NCCL #include "nccl_api.h" #include <dgl/array.h> #include <dgl/aten/array_ops.h> #include <dgl/runtime/container.h> #include <dgl/runtime/device_api.h> #include <dgl/packed_func_ext.h> #include <dgl/runtime/registry.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cmath> #include <sstream> #include <iomanip> #include <utility> #include <vector> #include <memory> #include <string> #include <limits> #include "cuda_common.h" #include "../../runtime/workspace.h" #include "../../partition/ndarray_partition.h" #include "../../array/cuda/dgl_cub.cuh" #include "../../array/cuda/array_index_select.cuh" #define NCCL_CALL(func) \ { \ ncclResult_t result = func; \ if (result != ncclSuccess) { \ LOG(FATAL) \ << "NCCLError: " #func " failed with error: " << result; \ } \ } namespace dgl { using namespace partition; namespace runtime { namespace cuda { namespace { enum class AllToAllMode : int { REMAINDER = 0 }; template<typename T> ncclDataType_t NCCLType(); template<> ncclDataType_t NCCLType<int32_t>() { return ncclInt32; } template<> ncclDataType_t NCCLType<int64_t>() { return ncclInt64; } template<> ncclDataType_t NCCLType<__half>() { return ncclHalf; } template<> ncclDataType_t NCCLType<float>() { return ncclFloat32; } template<> ncclDataType_t NCCLType<double>() { return ncclFloat64; } template<typename IdType, typename DType> __global__ void _DualPermKernel( const IdType * const in_idx, const DType * const in_value, const IdType * const perm, const int64_t num_in, const int64_t num_feat, IdType * const out_idx, DType * const out_value) { // set index permutation const int64_t tidx = blockDim.x*static_cast<int64_t>(blockIdx.x)+threadIdx.x; if (tidx < num_in) { const IdType perm_idx = perm[tidx]; assert(perm_idx < num_in); out_idx[tidx] = in_idx[perm_idx]; } if (num_feat > 1) { for (int d = 0; d < blockDim.x; ++d) { const int64_t bidx = blockDim.x*static_cast<int64_t>(blockIdx.x) + d; if (bidx < num_in) { const IdType perm_idx = perm[bidx]; for (int64_t f = threadIdx.x; f < num_feat; f+=blockDim.x) { out_value[bidx*num_feat+f] = in_value[perm_idx*num_feat+f]; } } } } else { if (tidx < num_in) { const IdType perm_idx = perm[tidx]; out_value[tidx] = in_value[perm_idx]; } } } template <typename DType, typename IdType> __global__ void _InversePermKernel( const DType* const array, const int64_t num_feat, int64_t length, const IdType* const perm, DType* const out) { int64_t in_row = blockIdx.x*blockDim.y+threadIdx.y; const int64_t stride = blockDim.y*gridDim.x; while (in_row < length) { int64_t col = threadIdx.x; const int64_t out_row = perm[in_row]; while (col < num_feat) { out[out_row*num_feat+col] = array[in_row*num_feat+col]; col += blockDim.x; } in_row += stride; } } template<typename IdType, typename DType> std::pair<IdArray, NDArray> SparsePush( NCCLCommunicatorRef comm, IdArray in_idx, NDArray in_value, NDArrayPartitionRef part) { const auto& ctx = in_idx->ctx; CHECK_EQ(ctx, in_value->ctx) << "Indices and values must be on the same " "device"; auto device = DeviceAPI::Get(ctx); // TODO(dlasalle): Get the stream from the device context. cudaStream_t stream = 0; CHECK_LE(in_idx->ndim, 1) << "The tensor of sending indices must be of " "dimension one (or empty)."; const int64_t num_in = in_idx->ndim > 0 ? in_idx->shape[0] : 0; CHECK_EQ(num_in, in_value->ndim > 0 ? in_value->shape[0] : 0) << "Leading dimension of indices (" << num_in << ") must match " "leading dimension of values (" << (in_value->ndim > 0 ? in_value->shape[0] : 0) << ")."; int64_t num_feat = 1; for (int d = 1; d < in_value->ndim; ++d) { num_feat *= in_value->shape[d]; } const int64_t comm_size = comm->size(); if (comm_size == 1) { // nothing to do, just return original arrays return std::pair<IdArray, NDArray>(in_idx, in_value); } std::pair<IdArray, NDArray> part_perm = part->GeneratePermutation(in_idx); const IdType * const perm = static_cast<const IdType*>(part_perm.first->data); const int64_t * const send_sum = static_cast<const int64_t*>(part_perm.second->data); Workspace<IdType> send_idx(device, ctx, num_in); Workspace<DType> send_value(device, ctx, num_in*num_feat); // permute the indices and values if (num_in > 0) { const dim3 block(256); const dim3 grid((num_in+block.x-1)/block.x); _DualPermKernel<<<grid, block, 0, stream>>>( static_cast<const IdType*>(in_idx->data), static_cast<const DType*>(in_value->data), perm, num_in, num_feat, send_idx.get(), send_value.get()); CUDA_CALL(cudaGetLastError()); } // compute the prefix sum of the send values Workspace<int64_t> send_prefix(device, ctx, comm_size+1); { size_t prefix_workspace_size; CUDA_CALL(cub::DeviceScan::ExclusiveSum(nullptr, prefix_workspace_size, send_sum, send_prefix.get(), comm_size+1, stream)); Workspace<void> prefix_workspace(device, ctx, prefix_workspace_size); CUDA_CALL(cub::DeviceScan::ExclusiveSum(prefix_workspace.get(), prefix_workspace_size, send_sum, send_prefix.get(), comm_size+1, stream)); } std::vector<int64_t> send_prefix_host(comm_size+1); device->CopyDataFromTo( send_prefix.get(), 0, send_prefix_host.data(), 0, send_prefix_host.size()*sizeof(*send_prefix.get()), ctx, DGLContext{kDLCPU, 0}, DGLType{kDLInt, sizeof(*send_prefix.get())*8, 1}, stream); send_prefix.free(); CHECK_EQ(send_prefix_host.back(), num_in) << "Internal Error: " "send_prefix_host.back() = " << send_prefix_host.back() << ", and num_in = " << num_in; // communicate the amount to send Workspace<int64_t> recv_sum(device, ctx, comm_size+1); comm->AllToAll(send_sum, recv_sum.get(), 1, stream); cudaEvent_t d2h; cudaEventCreate(&d2h); // compute the prefix sum of the recv values Workspace<int64_t> recv_prefix(device, ctx, comm_size+1); { size_t prefix_workspace_size; CUDA_CALL(cub::DeviceScan::ExclusiveSum(nullptr, prefix_workspace_size, recv_sum.get(), recv_prefix.get(), comm_size+1)); Workspace<void> prefix_workspace(device, ctx, prefix_workspace_size); CUDA_CALL(cub::DeviceScan::ExclusiveSum(prefix_workspace.get(), prefix_workspace_size, recv_sum.get(), recv_prefix.get(), comm_size+1)); } recv_sum.free(); // finally copy the prefixsum sum down to the host std::vector<int64_t> recv_prefix_host(comm_size+1); device->CopyDataFromTo( recv_prefix.get(), 0, recv_prefix_host.data(), 0, recv_prefix_host.size()*sizeof(*recv_prefix.get()), ctx, DGLContext{kDLCPU, 0}, DGLType{kDLInt, sizeof(*recv_prefix.get())*8, 1}, stream); recv_prefix.free(); // use an event to track when copying is done cudaEventRecord(d2h, stream); // allocate output space cudaEventSynchronize(d2h); cudaEventDestroy(d2h); IdArray recv_idx = aten::NewIdArray( recv_prefix_host.back(), ctx, sizeof(IdType)*8); std::vector<int64_t> value_shape(in_value->ndim, 0); value_shape[0] = recv_prefix_host.back(); for (int d = 1; d < in_value->ndim; ++d) { value_shape[d] = in_value->shape[d]; } NDArray recv_value = NDArray::Empty(value_shape, in_value->dtype, ctx); // send data comm->SparseAllToAll( send_idx.get(), send_value.get(), num_feat, send_prefix_host.data(), static_cast<IdType*>(recv_idx->data), static_cast<DType*>(recv_value->data), recv_prefix_host.data(), stream); return std::pair<IdArray, NDArray>(recv_idx, recv_value); } template<typename IdType, typename DType> NDArray SparsePull( NCCLCommunicatorRef comm, IdArray req_idx, NDArray local_tensor, NDArrayPartitionRef part) { const auto& ctx = req_idx->ctx; CHECK_EQ(ctx, local_tensor->ctx) << "The request indices and set of local " "values must be on the same device"; auto device = DeviceAPI::Get(ctx); cudaStream_t stream = CUDAThreadEntry::ThreadLocal()->stream; CHECK_LE(req_idx->ndim, 1) << "The tensor of requested indices must be of " "dimension one (or empty)."; const int64_t num_in = req_idx->ndim > 0 ? req_idx->shape[0] : 0; int64_t num_feat = 1; for (int d = 1; d < local_tensor->ndim; ++d) { num_feat *= local_tensor->shape[d]; } const int64_t comm_size = comm->size(); if (comm_size == 1) { // Just return index selection from current local_tensor return aten::IndexSelect(local_tensor, req_idx); } // First we need to send our requests to other processors. This means // re-ordering our index array to be contiguous among processors, and // counting the number of indices we are sending each processor. For now, // we assume a poorly partitioned graph, and that there exists the // possibility that each processor could request data from this one. // the buffer for us to re-order our requests in Workspace<IdType> send_idx(device, ctx, num_in); std::pair<IdArray, NDArray> part_perm = part->GeneratePermutation(req_idx); const IdType * const perm = static_cast<const IdType*>(part_perm.first->data); const int64_t * const send_sum = static_cast<const int64_t*>(part_perm.second->data); // permute requests if (num_in > 0) { const dim3 block(256); const dim3 grid((num_in+block.x-1)/block.x); aten::impl::IndexSelectSingleKernel<<<grid, block, 0, stream>>>( static_cast<const IdType*>(req_idx->data), perm, num_in, req_idx->shape[0], send_idx.get()); CUDA_CALL(cudaGetLastError()); } // compute the prefix sum of the indexes this process is requesting Workspace<int64_t> request_prefix(device, ctx, comm_size+1); { size_t prefix_workspace_size; CUDA_CALL(cub::DeviceScan::ExclusiveSum(nullptr, prefix_workspace_size, send_sum, request_prefix.get(), comm_size+1, stream)); Workspace<void> prefix_workspace(device, ctx, prefix_workspace_size); CUDA_CALL(cub::DeviceScan::ExclusiveSum(prefix_workspace.get(), prefix_workspace_size, send_sum, request_prefix.get(), comm_size+1, stream)); } cudaEvent_t d2h; cudaEventCreate(&d2h); std::vector<int64_t> request_prefix_host(comm_size+1); device->CopyDataFromTo( request_prefix.get(), 0, request_prefix_host.data(), 0, request_prefix_host.size()*sizeof(*request_prefix.get()), ctx, DGLContext{kDLCPU, 0}, DGLType{kDLInt, sizeof(*request_prefix.get())*8, 1}, stream); request_prefix.free(); CHECK_EQ(request_prefix_host.back(), num_in) << "Internal Error: " "request_prefix_host.back() = " << request_prefix_host.back() << ", num_in = " << num_in; // communicate the amount requested Workspace<int64_t> recv_sum(device, ctx, comm_size+1); comm->AllToAll(send_sum, recv_sum.get(), 1, stream); // compute the prefix sum of the requested indexes Workspace<int64_t> response_prefix(device, ctx, comm_size+1); { size_t prefix_workspace_size; CUDA_CALL(cub::DeviceScan::ExclusiveSum(nullptr, prefix_workspace_size, recv_sum.get(), response_prefix.get(), comm_size+1, stream)); Workspace<void> prefix_workspace(device, ctx, prefix_workspace_size); CUDA_CALL(cub::DeviceScan::ExclusiveSum(prefix_workspace.get(), prefix_workspace_size, recv_sum.get(), response_prefix.get(), comm_size+1, stream)); } recv_sum.free(); // finally copy the prefixsum sum down to the host std::vector<int64_t> response_prefix_host(comm_size+1); device->CopyDataFromTo( response_prefix.get(), 0, response_prefix_host.data(), 0, response_prefix_host.size()*sizeof(*response_prefix.get()), ctx, DGLContext{kDLCPU, 0}, DGLType{kDLInt, sizeof(*response_prefix.get())*8, 1}, stream); response_prefix.free(); // use an event to track when copying is done cudaEventRecord(d2h, stream); // allocate output space cudaEventSynchronize(d2h); cudaEventDestroy(d2h); // gather requested indexes IdArray recv_idx = aten::NewIdArray( response_prefix_host.back(), ctx, sizeof(IdType)*8); comm->AllToAllV( send_idx.get(), request_prefix_host.data(), static_cast<IdType*>(recv_idx->data), response_prefix_host.data(), stream); send_idx.free(); // convert requested indices to local indices depending on partition if (response_prefix_host.back() > 0) { recv_idx = part->MapToLocal(recv_idx); } // and then index select them into place Workspace<DType> filled_response_value(device, ctx, response_prefix_host.back()*num_feat); if (request_prefix_host.back() > 0) { dim3 block(256, 1); while (block.x >= 2*num_feat) { block.x /= 2; block.y *= 2; } const dim3 grid((request_prefix_host.back()+block.y-1)/block.y); aten::impl::IndexSelectMultiKernel<<<grid, block, 0, stream>>>( static_cast<const DType*>(local_tensor->data), num_feat, static_cast<IdType*>(recv_idx->data), response_prefix_host.back(), local_tensor->shape[0], filled_response_value.get()); CUDA_CALL(cudaGetLastError()); } // we will collect recieved values in this array std::vector<int64_t> value_shape(local_tensor->ndim, 0); value_shape[0] = request_prefix_host.back(); for (int d = 1; d < local_tensor->ndim; ++d) { value_shape[d] = local_tensor->shape[d]; } Workspace<DType> filled_request_value(device, ctx, request_prefix_host.back()*num_feat); // multiply the prefixes by the number of features being sent for (auto& v : request_prefix_host) { v *= num_feat; } for (auto& v : response_prefix_host) { v *= num_feat; } // send the values comm->AllToAllV( filled_response_value.get(), response_prefix_host.data(), filled_request_value.get(), request_prefix_host.data(), stream); filled_response_value.free(); // finally, we need to permute the values back into the requested order NDArray result = NDArray::Empty(value_shape, local_tensor->dtype, ctx); if (num_in > 0) { dim3 block(256, 1); while (block.x >= 2*num_feat) { block.x /= 2; block.y *= 2; } const dim3 grid((num_in+block.y-1)/block.y); _InversePermKernel<<<grid, block, 0, stream>>>( filled_request_value.get(), num_feat, num_in, perm, static_cast<DType*>(result->data)); CUDA_CALL(cudaGetLastError()); } return result; } } // namespace /* NCCLUniqueId **************************************************************/ NCCLUniqueId::NCCLUniqueId() : id_() { // this ID is unique to the process, not to each call of this function NCCL_CALL(ncclGetUniqueId(&id_)); } ncclUniqueId NCCLUniqueId::Get() const { return id_; } std::string NCCLUniqueId::ToString() const { std::ostringstream oss; oss << std::hex; for (size_t b = 0; b < NCCL_UNIQUE_ID_BYTES; ++b) { const int num = static_cast<uint8_t>(id_.internal[b]); oss << std::setw(2) << std::setfill('0') << num; } std::string result = oss.str(); CHECK_EQ(result.length(), NCCL_UNIQUE_ID_BYTES*2) << "Invalid NCCL ID format: '" << result << "'"; return result; } void NCCLUniqueId::FromString( const std::string& str) { // must be exactly 256 hex characters CHECK_EQ(str.length(), NCCL_UNIQUE_ID_BYTES * 2) << "Invalid NCCL ID format: '" << str << "'"; for (size_t b = 0; b < NCCL_UNIQUE_ID_BYTES; ++b) { id_.internal[b] = std::strtol(str.substr(b*2, 2).c_str(), nullptr, 16); } } /* NCCLCommunicator **********************************************************/ NCCLCommunicator::NCCLCommunicator( const int size, const int rank, ncclUniqueId id) : comm_(), size_(size), rank_(rank) { CHECK_LT(rank, size) << "The rank (" << rank << ") must be smaller than " "the size of the communicator (" << size << ")."; CHECK_GE(rank, 0) << "The rank (" << rank << ") must be greater than or " "equal to 0."; NCCL_CALL(ncclCommInitRank(&comm_, size_, id, rank_)); } NCCLCommunicator::~NCCLCommunicator() { ncclCommDestroy(comm_); } ncclComm_t NCCLCommunicator::Get() { return comm_; } template<typename DType> void NCCLCommunicator::AllToAllV( const DType * const send, const int64_t * const send_prefix, DType * const recv, const int64_t * const recv_prefix, cudaStream_t stream) { const ncclDataType_t type = NCCLType<DType>(); NCCL_CALL(ncclGroupStart()); for (int r = 0; r < size_; ++r) { const int64_t send_size = send_prefix[r+1]-send_prefix[r]; if (send_size > 0) { NCCL_CALL(ncclSend(send+send_prefix[r], send_size, type, r, comm_, stream)); } const int64_t recv_size = recv_prefix[r+1]-recv_prefix[r]; if (recv_size > 0) { NCCL_CALL(ncclRecv(recv+recv_prefix[r], recv_size, type, r, comm_, stream)); } } NCCL_CALL(ncclGroupEnd()); } template void NCCLCommunicator::AllToAllV<int32_t>( const int32_t * const send, const int64_t * send_prefix, int32_t * const recv, const int64_t * recv_prefix, cudaStream_t stream); template void NCCLCommunicator::AllToAllV<int64_t>( const int64_t * const send, const int64_t * send_prefix, int64_t * const recv, const int64_t * recv_prefix, cudaStream_t stream); template void NCCLCommunicator::AllToAllV<float>( const float * const send, const int64_t * send_prefix, float * const recv, const int64_t * recv_prefix, cudaStream_t stream); template void NCCLCommunicator::AllToAllV<__half>( const __half * const send, const int64_t * send_prefix, __half * const recv, const int64_t * recv_prefix, cudaStream_t stream); template<typename IdType> void NCCLCommunicator::AllToAll( const IdType * const send, IdType * const recv, const int64_t count, cudaStream_t stream) { const ncclDataType_t type = NCCLType<IdType>(); ncclGroupStart(); for (int r = 0; r < size_; ++r) { ncclSend(send+(r*count), count, type, r, comm_, stream); ncclRecv(recv+(r*count), count, type, r, comm_, stream); } ncclGroupEnd(); } template void NCCLCommunicator::AllToAll<int32_t>( const int32_t * const send, int32_t * const recv, const int64_t count, cudaStream_t stream); template void NCCLCommunicator::AllToAll<int64_t>( const int64_t * const send, int64_t * const recv, const int64_t count, cudaStream_t stream); template<typename IdType, typename DType> void NCCLCommunicator::SparseAllToAll( const IdType * const send_idx, const DType * const send_value, const int64_t num_feat, const int64_t * const send_prefix, IdType * const recv_idx, DType * const recv_value, const int64_t * const recv_prefix, cudaStream_t stream) { const ncclDataType_t idx_type = NCCLType<IdType>(); const ncclDataType_t value_type = NCCLType<DType>(); ncclGroupStart(); for (int r = 0; r < size_; ++r) { const int64_t send_size = send_prefix[r+1]-send_prefix[r]; if (send_size > 0) { ncclSend(send_idx+send_prefix[r], send_size, idx_type, r, comm_, stream); ncclSend(send_value+send_prefix[r]*num_feat, send_size*num_feat, value_type, r, comm_, stream); } const int64_t recv_size = recv_prefix[r+1]-recv_prefix[r]; if (recv_size > 0) { ncclRecv(recv_idx+recv_prefix[r], recv_size, idx_type, r, comm_, stream); ncclRecv(recv_value+recv_prefix[r]*num_feat, recv_size*num_feat, value_type, r, comm_, stream); } } ncclGroupEnd(); } template void NCCLCommunicator::SparseAllToAll<int32_t, __half>( const int32_t * const send_idx, const __half * const send_value, const int64_t num_feat, const int64_t * const send_prefix, int32_t * const recv_idx, __half * const recv_value, const int64_t * const recv_prefix, cudaStream_t stream); template void NCCLCommunicator::SparseAllToAll<int64_t, __half>( const int64_t * const send_idx, const __half * const send_value, const int64_t num_feat, const int64_t * const send_prefix, int64_t * const recv_idx, __half * const recv_value, const int64_t * const recv_prefix, cudaStream_t stream); int NCCLCommunicator::size() const { return size_; } int NCCLCommunicator::rank() const { return rank_; } /* CAPI **********************************************************************/ DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLGetUniqueId") .set_body([] (DGLArgs args, DGLRetValue* rv) { *rv = NCCLUniqueIdRef(std::make_shared<NCCLUniqueId>()); }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLUniqueIdToString") .set_body([] (DGLArgs args, DGLRetValue* rv) { NCCLUniqueIdRef idObj = args[0]; *rv = idObj->ToString(); }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLUniqueIdFromString") .set_body([] (DGLArgs args, DGLRetValue* rv) { const std::string str = args[0]; NCCLUniqueIdRef ref(std::make_shared<NCCLUniqueId>()); ref->FromString(str); *rv = ref; }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLCreateComm") .set_body([] (DGLArgs args, DGLRetValue* rv) { const int size = args[0]; const int rank = args[1]; NCCLUniqueIdRef idObj = args[2]; *rv = NCCLCommunicatorRef(std::make_shared<NCCLCommunicator>(size, rank, idObj->Get())); }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLSparseAllToAllPush") .set_body([] (DGLArgs args, DGLRetValue* rv) { NCCLCommunicatorRef comm = args[0]; IdArray in_idx = args[1]; NDArray in_values = args[2]; NDArrayPartitionRef part = args[3]; List<ObjectRef> ret; ATEN_ID_TYPE_SWITCH(in_idx->dtype, IdType, { ATEN_DTYPE_SWITCH(in_values->dtype, DType, "values", { auto result = SparsePush<IdType, DType>(comm, in_idx, in_values, part); ret.push_back(Value(MakeValue(result.first))); ret.push_back(Value(MakeValue(result.second))); }); }); *rv = ret; }); DGL_REGISTER_GLOBAL("cuda.nccl._CAPI_DGLNCCLSparseAllToAllPull") .set_body([] (DGLArgs args, DGLRetValue* rv) { NCCLCommunicatorRef comm = args[0]; // the indexes this process is requesting from others IdArray req_idx = args[1]; // the tensor this process has to fulfill other requests NDArray tensor = args[2]; NDArrayPartitionRef part = args[3]; ATEN_ID_TYPE_SWITCH(req_idx->dtype, IdType, { ATEN_DTYPE_SWITCH(tensor->dtype, DType, "values", { *rv = SparsePull<IdType, DType>(comm, req_idx, tensor, part); }); }); }); } // namespace cuda } // namespace runtime } // namespace dgl #endif
a0bb088528c86f0407bfb4f6151d69cb3f263b0f.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "cuda/modulated_deform_im2col_cuda.cuh" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // #include <THH/THH.h> // #include <THH/THHAtomics.cuh> // #include <THH/THHDeviceUtils.cuh> // extern THCState *state; // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu at::Tensor modulated_deform_conv_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int group, const int deformable_group, const int im2col_step) { // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int im2col_step_ = ::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_) AT_ASSERTM((channels % group == 0) && (channels_out % group == 0), "channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group) // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == (channels_kernel * group), "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto output = at::empty({batch * height_out * width_out, channels_out}, input.options()); // prepare group weight and bias auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); auto bias_g = bias.view({group, channels_out/group}); // define alias for easy use const int batch_n = im2col_step_; const int per_input_size = channels * height * width; const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3); const int per_mask_size = mask.size(1) * mask.size(2) * mask.size(3); auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out}); for (int n = 0; n < batch/im2col_step_; ++n) { auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options()); AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_forward_cuda", ([&] { modulated_deformable_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data<scalar_t>() + n * im2col_step_ * per_input_size, offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, mask.data<scalar_t>() + n * im2col_step_ * per_mask_size, batch_n, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); })); auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out}); auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group}); for (int g = 0; g < group; ++g) { auto columns_gm = columns_g.select(0, g).t(); auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t(); auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm); output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group}); } } output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous(); return output; } std::vector<at::Tensor> modulated_deform_conv_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int group, const int deformable_group, const int im2col_step) { AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int batch_ = grad_output.size(0); const int channels_out_ = grad_output.size(1); const int height_out_ = grad_output.size(2); const int width_out_ = grad_output.size(3); const int im2col_step_ = ::min(im2col_step, batch); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_) AT_ASSERTM((channels % group == 0) && (channels_out % group == 0), "channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group) AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == (channels_kernel * group), "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; AT_ASSERTM(batch == batch_, "Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_); AT_ASSERTM(channels_out == channels_out_, "Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_); AT_ASSERTM(height_out == height_out_ && width_out == width_out_, "Input shape and grad_out shape wont match: (%d x %d vs %d x %d).", height_out, height_out_, width_out, width_out_); auto ones = at::ones({batch * height_out * width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, batch * 1 * height_out * width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); // prepare group weight and bias auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); auto grad_bias_g = grad_bias.view({group, channels_out/group}); const int batch_n = im2col_step_; const int per_input_size = channels * height * width; const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3); const int per_mask_size = mask.size(1) * mask.size(2) * mask.size(3); auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, height_out, width_out}); for (int n = 0; n < batch/im2col_step_; ++n) { auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, height_out, width_out}); auto ones = at::ones({batch_n * height_out * width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * 1 * height_out * width_out}, input.options()); auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out}); for (int g = 0; g < group; ++g) { auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out}); auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t(); columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm); } AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_backward_cuda", ([&] { modulated_deformable_col2im_coord_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data<scalar_t>(), input.data<scalar_t>() + n * im2col_step_ * per_input_size, offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, mask.data<scalar_t>() + n * im2col_step_ * per_mask_size, batch_n, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, grad_mask.data<scalar_t>() + n * im2col_step_ * per_mask_size); // gradient w.r.t. input data modulated_deformable_col2im_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data<scalar_t>(), offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, mask.data<scalar_t>() + n * im2col_step_ * per_mask_size, batch_n, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input.data<scalar_t>() + n * im2col_step_ * per_input_size); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data<scalar_t>() + n * im2col_step_ * per_input_size, offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, mask.data<scalar_t>() + n * im2col_step_ * per_mask_size, batch_n, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); })); // auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out}); // grad_weight = at::mm(grad_output_m, columns.t()).view_as(weight); // grad_bias = at::mv(grad_output_m, ones); // auto grad_output_g = grad_output.view({batch, group, channels_out/group, height_out, width_out}); // auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch * height_out * width_out}); for (int g = 0; g < group; ++g) { auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out}); auto columns_gm = columns_g.select(0, g).t(); auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}); auto grad_bias_gm = grad_bias_g.select(0, g); grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g)); grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones); } } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
a0bb088528c86f0407bfb4f6151d69cb3f263b0f.cu
#include <vector> #include "cuda/modulated_deform_im2col_cuda.cuh" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> // #include <THC/THC.h> // #include <THC/THCAtomics.cuh> // #include <THC/THCDeviceUtils.cuh> // extern THCState *state; // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu at::Tensor modulated_deform_conv_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int group, const int deformable_group, const int im2col_step) { // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int im2col_step_ = std::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_) AT_ASSERTM((channels % group == 0) && (channels_out % group == 0), "channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group) // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == (channels_kernel * group), "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto output = at::empty({batch * height_out * width_out, channels_out}, input.options()); // prepare group weight and bias auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); auto bias_g = bias.view({group, channels_out/group}); // define alias for easy use const int batch_n = im2col_step_; const int per_input_size = channels * height * width; const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3); const int per_mask_size = mask.size(1) * mask.size(2) * mask.size(3); auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out}); for (int n = 0; n < batch/im2col_step_; ++n) { auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options()); AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_forward_cuda", ([&] { modulated_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), input.data<scalar_t>() + n * im2col_step_ * per_input_size, offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, mask.data<scalar_t>() + n * im2col_step_ * per_mask_size, batch_n, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); })); auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out}); auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group}); for (int g = 0; g < group; ++g) { auto columns_gm = columns_g.select(0, g).t(); auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t(); auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm); output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group}); } } output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous(); return output; } std::vector<at::Tensor> modulated_deform_conv_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int group, const int deformable_group, const int im2col_step) { AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int batch_ = grad_output.size(0); const int channels_out_ = grad_output.size(1); const int height_out_ = grad_output.size(2); const int width_out_ = grad_output.size(3); const int im2col_step_ = std::min(im2col_step, batch); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_) AT_ASSERTM((channels % group == 0) && (channels_out % group == 0), "channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group) AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == (channels_kernel * group), "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; AT_ASSERTM(batch == batch_, "Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_); AT_ASSERTM(channels_out == channels_out_, "Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_); AT_ASSERTM(height_out == height_out_ && width_out == width_out_, "Input shape and grad_out shape wont match: (%d x %d vs %d x %d).", height_out, height_out_, width_out, width_out_); auto ones = at::ones({batch * height_out * width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, batch * 1 * height_out * width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); // prepare group weight and bias auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); auto grad_bias_g = grad_bias.view({group, channels_out/group}); const int batch_n = im2col_step_; const int per_input_size = channels * height * width; const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3); const int per_mask_size = mask.size(1) * mask.size(2) * mask.size(3); auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, height_out, width_out}); for (int n = 0; n < batch/im2col_step_; ++n) { auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, height_out, width_out}); auto ones = at::ones({batch_n * height_out * width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * 1 * height_out * width_out}, input.options()); auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out}); for (int g = 0; g < group; ++g) { auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out}); auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t(); columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm); } AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_backward_cuda", ([&] { modulated_deformable_col2im_coord_cuda(at::cuda::getCurrentCUDAStream(), columns.data<scalar_t>(), input.data<scalar_t>() + n * im2col_step_ * per_input_size, offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, mask.data<scalar_t>() + n * im2col_step_ * per_mask_size, batch_n, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, grad_mask.data<scalar_t>() + n * im2col_step_ * per_mask_size); // gradient w.r.t. input data modulated_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), columns.data<scalar_t>(), offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, mask.data<scalar_t>() + n * im2col_step_ * per_mask_size, batch_n, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input.data<scalar_t>() + n * im2col_step_ * per_input_size); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), input.data<scalar_t>() + n * im2col_step_ * per_input_size, offset.data<scalar_t>() + n * im2col_step_ * per_offset_size, mask.data<scalar_t>() + n * im2col_step_ * per_mask_size, batch_n, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); })); // auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out}); // grad_weight = at::mm(grad_output_m, columns.t()).view_as(weight); // grad_bias = at::mv(grad_output_m, ones); // auto grad_output_g = grad_output.view({batch, group, channels_out/group, height_out, width_out}); // auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch * height_out * width_out}); for (int g = 0; g < group; ++g) { auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out}); auto columns_gm = columns_g.select(0, g).t(); auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}); auto grad_bias_gm = grad_bias_g.select(0, g); grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g)); grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones); } } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
ef620b2cd15e8efd7de8e605486d9d9e6e419f5b.hip
// !!! This is a file automatically generated by hipify!!! #include "CUDA_WRAPPER.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" bool allocateCUDAFloatBuffer(float* ptr, float** dev_ptr, index_t numFloats) { hipError_t deviceStatus; deviceStatus = hipMalloc((void**)dev_ptr, numFloats * sizeof(float)); if (!checkCudaErrorStatus(deviceStatus, "hipMalloc")) { return false; } else { return true; } } bool freeCUDAFloatBuffer(float* dev_ptr) { hipError_t deviceStatus; deviceStatus = hipFree(dev_ptr); if (!checkCudaErrorStatus(deviceStatus, "hipMalloc")) { return false; } else { return true; } } bool copyMem2Device(float* dev_ptr, float* ptr, index_t numFloats) { hipError_t deviceStatus; deviceStatus = hipMemcpy(dev_ptr, ptr, numFloats * sizeof(float), hipMemcpyHostToDevice); if (!checkCudaErrorStatus(deviceStatus, "hipMemcpy (hipMemcpyHostToDevice)")) { return false; } else { return true; } } bool copyMemFromDevice(float* ptr, float* dev_ptr, index_t numFloats) { hipError_t deviceStatus; deviceStatus = hipMemcpy(ptr, dev_ptr, numFloats * sizeof(float), hipMemcpyDeviceToHost); if (!checkCudaErrorStatus(deviceStatus, "hipMemcpy (hipMemcpyDeviceToHost)")) { return false; } else { return true; } }
ef620b2cd15e8efd7de8e605486d9d9e6e419f5b.cu
#include "CUDA_WRAPPER.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" bool allocateCUDAFloatBuffer(float* ptr, float** dev_ptr, index_t numFloats) { cudaError_t deviceStatus; deviceStatus = cudaMalloc((void**)dev_ptr, numFloats * sizeof(float)); if (!checkCudaErrorStatus(deviceStatus, "cudaMalloc")) { return false; } else { return true; } } bool freeCUDAFloatBuffer(float* dev_ptr) { cudaError_t deviceStatus; deviceStatus = cudaFree(dev_ptr); if (!checkCudaErrorStatus(deviceStatus, "cudaMalloc")) { return false; } else { return true; } } bool copyMem2Device(float* dev_ptr, float* ptr, index_t numFloats) { cudaError_t deviceStatus; deviceStatus = cudaMemcpy(dev_ptr, ptr, numFloats * sizeof(float), cudaMemcpyHostToDevice); if (!checkCudaErrorStatus(deviceStatus, "cudaMemcpy (cudaMemcpyHostToDevice)")) { return false; } else { return true; } } bool copyMemFromDevice(float* ptr, float* dev_ptr, index_t numFloats) { cudaError_t deviceStatus; deviceStatus = cudaMemcpy(ptr, dev_ptr, numFloats * sizeof(float), cudaMemcpyDeviceToHost); if (!checkCudaErrorStatus(deviceStatus, "cudaMemcpy (cudaMemcpyDeviceToHost)")) { return false; } else { return true; } }
b4f1fc2cf75b785bba76d9e2931d47ec41002f6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2013 Azlos Corporation * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/> */ /* * hausdorff.cu * * Created on: Sep 22, 2013 * Author: Paolo Galbiati */ #include "platform_config.h" #include "adj_path.h" #include "hausdorff.h" template <uint32_t BLOCK_SIZE, uint32_t ITERATIONS_PER_THREAD> __global__ void hausdorffCUDA(float* res, const path_point_t* p0, const path_point_t* p1, uint32_t points0, uint32_t points1) { const float EARTH_RADIUS = 6371.0f; // Block index uint32_t bx = blockIdx.x; uint32_t by = blockIdx.y; // Thread index uint32_t tx = threadIdx.x; uint32_t ty = threadIdx.y; uint32_t x = bx * BLOCK_SIZE + tx; uint32_t y = ITERATIONS_PER_THREAD * (by * BLOCK_SIZE + ty); #pragma unroll for (uint32_t i = y; i < y + ITERATIONS_PER_THREAD; i++) { if ((x < points0) && (i < points1)) { float delta_lat = (p1[i].lat - p0[x].lat) * 0.5f; float delta_lon = (p1[i].lon - p0[x].lon) * 0.5f; float tmp0 = __sinf(delta_lat); float tmp1 = __sinf(delta_lon); float a = tmp0 * tmp0 + __cosf(p0[x].lat) * __cosf(p1[i].lat) * tmp1 * tmp1; float c = 2 * atan2f(sqrtf(a), sqrtf(1 - a)); *(res + x * points1 + i) = EARTH_RADIUS * c; } } } void hausdorffGPU(float* res, const path_point_t* p0, const path_point_t* p1, uint32_t points0, uint32_t points1) { const uint32_t BLOCK_SIZE = 32; const uint32_t ITERATIONS_PER_THREAD = 8; // Setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE); uint32_t dim_x = points0 / threads.x; uint32_t dim_y = points1 / threads.y; if (points0 % threads.x) { dim_x += 1; } if (points1 % threads.y) { dim_y += 1; } if (dim_y % ITERATIONS_PER_THREAD == 0) { dim_y = dim_y / ITERATIONS_PER_THREAD; } else { dim_y = dim_y / ITERATIONS_PER_THREAD + 1; } dim3 grid(dim_x, dim_y); hipLaunchKernelGGL(( hausdorffCUDA<BLOCK_SIZE, ITERATIONS_PER_THREAD>), dim3(grid), dim3(threads) , 0, 0, res, p0, p1, points0, points1); } float hausdorff_gpu::distance_impl(const shared_ptr<gpu_device> gpu, const adj_path& p0, const adj_path& p1) { uint32_t points0 = p0.get_points_number(); uint32_t points1 = p1.get_points_number(); shared_ptr<float> results(new float[points0 * points1]); float* results_ptr = results.get(); float dist = 0.0f; /* Allocate GPU buffer */ uint32_t data_size = (points0 * points1) * sizeof(float); float* result_buffer = nullptr; if (false == gpu->gpu_device_malloc((void**)&result_buffer, data_size)) { return dist; } hausdorffGPU(result_buffer, p0.get_device_data(), p1.get_device_data(), points0, points1); gpu->gpu_device_synchronize(); // Copy result from device to host if (true == gpu->gpu_memcpy(results.get(), result_buffer, data_size, gpu_memcpy_device_to_host)) { dist = maxmin_impl(results, points0, points1); } gpu->gpu_device_free(result_buffer); return dist; }
b4f1fc2cf75b785bba76d9e2931d47ec41002f6c.cu
/* * Copyright (C) 2013 Azlos Corporation * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/> */ /* * hausdorff.cu * * Created on: Sep 22, 2013 * Author: Paolo Galbiati */ #include "platform_config.h" #include "adj_path.h" #include "hausdorff.h" template <uint32_t BLOCK_SIZE, uint32_t ITERATIONS_PER_THREAD> __global__ void hausdorffCUDA(float* res, const path_point_t* p0, const path_point_t* p1, uint32_t points0, uint32_t points1) { const float EARTH_RADIUS = 6371.0f; // Block index uint32_t bx = blockIdx.x; uint32_t by = blockIdx.y; // Thread index uint32_t tx = threadIdx.x; uint32_t ty = threadIdx.y; uint32_t x = bx * BLOCK_SIZE + tx; uint32_t y = ITERATIONS_PER_THREAD * (by * BLOCK_SIZE + ty); #pragma unroll for (uint32_t i = y; i < y + ITERATIONS_PER_THREAD; i++) { if ((x < points0) && (i < points1)) { float delta_lat = (p1[i].lat - p0[x].lat) * 0.5f; float delta_lon = (p1[i].lon - p0[x].lon) * 0.5f; float tmp0 = __sinf(delta_lat); float tmp1 = __sinf(delta_lon); float a = tmp0 * tmp0 + __cosf(p0[x].lat) * __cosf(p1[i].lat) * tmp1 * tmp1; float c = 2 * atan2f(sqrtf(a), sqrtf(1 - a)); *(res + x * points1 + i) = EARTH_RADIUS * c; } } } void hausdorffGPU(float* res, const path_point_t* p0, const path_point_t* p1, uint32_t points0, uint32_t points1) { const uint32_t BLOCK_SIZE = 32; const uint32_t ITERATIONS_PER_THREAD = 8; // Setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE); uint32_t dim_x = points0 / threads.x; uint32_t dim_y = points1 / threads.y; if (points0 % threads.x) { dim_x += 1; } if (points1 % threads.y) { dim_y += 1; } if (dim_y % ITERATIONS_PER_THREAD == 0) { dim_y = dim_y / ITERATIONS_PER_THREAD; } else { dim_y = dim_y / ITERATIONS_PER_THREAD + 1; } dim3 grid(dim_x, dim_y); hausdorffCUDA<BLOCK_SIZE, ITERATIONS_PER_THREAD><<< grid, threads >>>(res, p0, p1, points0, points1); } float hausdorff_gpu::distance_impl(const shared_ptr<gpu_device> gpu, const adj_path& p0, const adj_path& p1) { uint32_t points0 = p0.get_points_number(); uint32_t points1 = p1.get_points_number(); shared_ptr<float> results(new float[points0 * points1]); float* results_ptr = results.get(); float dist = 0.0f; /* Allocate GPU buffer */ uint32_t data_size = (points0 * points1) * sizeof(float); float* result_buffer = nullptr; if (false == gpu->gpu_device_malloc((void**)&result_buffer, data_size)) { return dist; } hausdorffGPU(result_buffer, p0.get_device_data(), p1.get_device_data(), points0, points1); gpu->gpu_device_synchronize(); // Copy result from device to host if (true == gpu->gpu_memcpy(results.get(), result_buffer, data_size, gpu_memcpy_device_to_host)) { dist = maxmin_impl(results, points0, points1); } gpu->gpu_device_free(result_buffer); return dist; }
0dbd46642a080fe03c54f1496bb50fe7788b5578.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void zgedensereimsplit_kernel( int num_rows, int num_cols, magma_index_t* rowidx, magmaDoubleComplex * A, magmaDoubleComplex * ReA, magmaDoubleComplex * ImA ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 ); ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 ); } } } /** Purpose ------- This routine takes an input matrix A in DENSE format and located on the GPU and splits it into two matrixes ReA and ImA containing the real and the imaginary contributions of A. The output matrices are allocated within the routine. Arguments --------- @param[in] A magma_z_matrix input matrix A. @param[out] ReA magma_z_matrix* output matrix contaning real contributions. @param[out] ImA magma_z_matrix* output matrix contaning complex contributions. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgedensereimsplit( magma_z_matrix A, magma_z_matrix *ReA, magma_z_matrix *ImA, magma_queue_t queue ) { magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue ); magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue ); int m = A.num_rows; int n = A.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( zgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, A.row, A.dval, ReA->dval, ImA->dval ); return MAGMA_SUCCESS; }
0dbd46642a080fe03c54f1496bb50fe7788b5578.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void zgedensereimsplit_kernel( int num_rows, int num_cols, magma_index_t* rowidx, magmaDoubleComplex * A, magmaDoubleComplex * ReA, magmaDoubleComplex * ImA ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 ); ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 ); } } } /** Purpose ------- This routine takes an input matrix A in DENSE format and located on the GPU and splits it into two matrixes ReA and ImA containing the real and the imaginary contributions of A. The output matrices are allocated within the routine. Arguments --------- @param[in] A magma_z_matrix input matrix A. @param[out] ReA magma_z_matrix* output matrix contaning real contributions. @param[out] ImA magma_z_matrix* output matrix contaning complex contributions. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgedensereimsplit( magma_z_matrix A, magma_z_matrix *ReA, magma_z_matrix *ImA, magma_queue_t queue ) { magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue ); magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue ); int m = A.num_rows; int n = A.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; zgedensereimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, A.row, A.dval, ReA->dval, ImA->dval ); return MAGMA_SUCCESS; }
fdff8a4d5d87caca750ed62bd7de9add371c3b5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/bnll_layer.hpp" namespace caffe { const float kBNLL_THRESHOLD = 50.; template <typename Dtype> __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } template <typename Dtype> void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( BNLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void BNLLBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { const float kBNLL_THRESHOLD = 50.0F; Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } template <typename Dtype> void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( BNLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); } // namespace caffe
fdff8a4d5d87caca750ed62bd7de9add371c3b5c.cu
#include <algorithm> #include <vector> #include "caffe/layers/bnll_layer.hpp" namespace caffe { const float kBNLL_THRESHOLD = 50.; template <typename Dtype> __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } template <typename Dtype> void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void BNLLBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { const float kBNLL_THRESHOLD = 50.0F; Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } template <typename Dtype> void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); } // namespace caffe
8c79bc60f6ecd31ede683efbd6afdb345d8feaf5.hip
// !!! This is a file automatically generated by hipify!!! // ======================================================================== // // Copyright 2018-2019 Ingo Wald // // // // Licensed under the Apache License, Version 2.0 (the "License"); // // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // // // // http://www.apache.org/licenses/LICENSE-2.0 // // // // Unless required by applicable law or agreed to in writing, software // // distributed under the License is distributed on an "AS IS" BASIS, // // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // // limitations under the License. // // ======================================================================== // #include <hip/hip_runtime.h> #include <optix_device.h> #include "lcg.h" #include "vec.h" enum { RADIANCE_RAY_TYPE = 0, SHADOW_RAY_TYPE, RAY_TYPE_COUNT }; namespace osc { #include "launch_params.h" } using namespace osc; #define NUM_LIGHT_SAMPLES 16 #define NUM_PIXEL_SAMPLES 4 namespace osc { typedef LCG<16> Random; /*! launch parameters in constant memory, filled in by optix upon optixLaunch (this gets filled in from the buffer we pass to optixLaunch) */ extern "C" __constant__ LaunchParams optixLaunchParams; /*! per-ray data now captures random numebr generator, so programs can access RNG state */ struct PRD { Random random; V3f32 pixelColor; }; static __forceinline__ DEVICE void* unpackPointer(u32 i0, u32 i1) { const u64 uptr = static_cast<u64>(i0) << 32 | i1; void* ptr = reinterpret_cast<void*>(uptr); return ptr; } static __forceinline__ DEVICE void packPointer(void* ptr, u32& i0, u32& i1) { const u64 uptr = reinterpret_cast<u64>(ptr); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } template <typename T> static __forceinline__ DEVICE T* getPRD() { const u32 u0 = optixGetPayload_0(); const u32 u1 = optixGetPayload_1(); return reinterpret_cast<T*>(unpackPointer(u0, u1)); } //------------------------------------------------------------------------------ // closest hit and anyhit programs for radiance-type rays. // // Note eventually we will have to create one pair of those for each // ray type and each geometry type we want to render; but this // simple example doesn't use any actual geometries yet, so we only // create a single, dummy, set of them (we do have to have at least // one group of them to set up the SBT) //------------------------------------------------------------------------------ extern "C" __global__ void __closesthit__shadow() { /* not going to be used ... */ } extern "C" __global__ void __closesthit__radiance() { const TriangleMeshSBTData& sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); PRD& prd = *getPRD<PRD>(); // ------------------------------------------------------------------ // gather some basic hit information // ------------------------------------------------------------------ const i32 primID = optixGetPrimitiveIndex(); const V3i32 index = sbtData.index[primID]; const f32 u = optixGetTriangleBarycentrics().x; const f32 v = optixGetTriangleBarycentrics().y; // ------------------------------------------------------------------ // compute normal, using either shading normal (if avail), or // geometry normal (fallback) // ------------------------------------------------------------------ const V3f32& A = sbtData.vertex[index.x]; const V3f32& B = sbtData.vertex[index.y]; const V3f32& C = sbtData.vertex[index.z]; V3f32 Ng = cross(B - A, C - A); V3f32 Ns = (sbtData.normal.is_null()) ? Ng : ((1.f - u - v) * sbtData.normal[index.x] + u * sbtData.normal[index.y] + v * sbtData.normal[index.z]); // ------------------------------------------------------------------ // face-forward and normalize normals // ------------------------------------------------------------------ const V3f32 rayDir = optixGetWorldRayDirection(); if (dot(rayDir, Ng) > 0.f) Ng = -Ng; Ng = normalize(Ng); if (dot(Ng, Ns) < 0.f) Ns = Ns - 2.f * dot(Ng, Ns) * Ng; Ns = normalize(Ns); // ------------------------------------------------------------------ // compute diffuse material color, including diffuse texture, if // available // ------------------------------------------------------------------ V3f32 diffuseColor = sbtData.color; if (sbtData.has_texture && !sbtData.texcoord.is_null()) { const V2f32 tc = (1.f - u - v) * sbtData.texcoord[index.x] + u * sbtData.texcoord[index.y] + v * sbtData.texcoord[index.z]; V4f32 fromTexture = tex2D<float4>(sbtData.texture, tc.x, tc.y); diffuseColor = diffuseColor * fromTexture.xyz(); } // start with some ambient term V3f32 pixelColor = (0.01f + 0.1f * fabsf(dot(Ns, rayDir))) * diffuseColor; // ------------------------------------------------------------------ // compute shadow // ------------------------------------------------------------------ const V3f32 surfPos = (1.f - u - v) * sbtData.vertex[index.x] + u * sbtData.vertex[index.y] + v * sbtData.vertex[index.z]; const i32 numLightSamples = NUM_LIGHT_SAMPLES; for (i32 lightSampleID = 0; lightSampleID < numLightSamples; lightSampleID++) { // produce random light sample const V3f32 lightPos = optixLaunchParams.light.origin + prd.random() * optixLaunchParams.light.du + prd.random() * optixLaunchParams.light.dv; V3f32 lightDir = lightPos - surfPos; f32 lightDist = lightDir.length(); lightDir = normalize(lightDir); // trace shadow ray: const f32 NdotL = dot(lightDir, Ns); if (NdotL >= 0.f) { V3f32 lightVisibility(1.f); // the values we store the PRD poi32er in: u32 u0, u1; packPointer(&lightVisibility, u0, u1); optixTrace(optixLaunchParams.traversable, surfPos + 1e-3f * Ng, lightDir, 1e-3f, // tmin lightDist * (1.f - 1e-3f), // tmax 0.0f, // rayTime OptixVisibilityMask(255), // anyhit ON for shadow rays: OPTIX_RAY_FLAG_NONE, SHADOW_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride SHADOW_RAY_TYPE, // missSBTIndex u0, u1); pixelColor = pixelColor + lightVisibility * optixLaunchParams.light.power * diffuseColor * (NdotL / (lightDist * lightDist * numLightSamples)); } } prd.pixelColor = pixelColor; } extern "C" __global__ void __anyhit__radiance() { /*! for this simple example, this will remain empty */ } extern "C" __global__ void __anyhit__shadow() { // in this simple example, we terminate on ANY hit V3f32& prd = *getPRD<V3f32>(); prd = V3f32(0.f); optixTerminateRay(); } //------------------------------------------------------------------------------ // miss program that gets called for any ray that did not have a // valid i32ersection // // as with the anyhit/closest hit programs, in this example we only // need to have _some_ dummy function to set up a valid SBT // ------------------------------------------------------------------------------ extern "C" __global__ void __miss__radiance() { PRD& prd = *getPRD<PRD>(); // set to constant white as background color prd.pixelColor = V3f32(1.f); } extern "C" __global__ void __miss__shadow() { // misses shouldn't mess with shadow opacity - do nothing } //------------------------------------------------------------------------------ // ray gen program - the actual rendering happens in here //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__renderFrame() { // compute a test pattern based on pixel ID const i32 ix = optixGetLaunchIndex().x; const i32 iy = optixGetLaunchIndex().y; const i32 accum_id = optixLaunchParams.frame.accum_id; const auto& camera = optixLaunchParams.camera; PRD prd; prd.random.init(ix + accum_id * optixLaunchParams.frame.size.x, iy + accum_id * optixLaunchParams.frame.size.y); prd.pixelColor = V3f32(0.f); // the values we store the PRD poi32er in: u32 u0, u1; packPointer(&prd, u0, u1); i32 numPixelSamples = NUM_PIXEL_SAMPLES; V3f32 pixelColor(0.f); for (i32 sampleID = 0; sampleID < numPixelSamples; sampleID++) { // normalized screen plane position, in [0,1]^2 const V2f32 screen(V2f32(ix + prd.random(), iy + prd.random()) / V2f32(optixLaunchParams.frame.size.x, optixLaunchParams.frame.size.y)); // generate ray direction V3f32 rayDir = normalize(camera.direction + (screen.x - 0.5f) * camera.horizontal + (screen.y - 0.5f) * camera.vertical); optixTrace(optixLaunchParams.traversable, camera.position, rayDir, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, RADIANCE_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride RADIANCE_RAY_TYPE, // missSBTIndex u0, u1); pixelColor = pixelColor + prd.pixelColor; } const u32 fbIndex = ix + iy * optixLaunchParams.frame.size.x; optixLaunchParams.frame.color_buffer[fbIndex] = make_float4( pixelColor.x / numPixelSamples, pixelColor.y / numPixelSamples, pixelColor.z / numPixelSamples, 1.0f); } } // namespace osc
8c79bc60f6ecd31ede683efbd6afdb345d8feaf5.cu
// ======================================================================== // // Copyright 2018-2019 Ingo Wald // // // // Licensed under the Apache License, Version 2.0 (the "License"); // // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // // // // http://www.apache.org/licenses/LICENSE-2.0 // // // // Unless required by applicable law or agreed to in writing, software // // distributed under the License is distributed on an "AS IS" BASIS, // // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // // limitations under the License. // // ======================================================================== // #include <cuda_runtime.h> #include <optix_device.h> #include "lcg.h" #include "vec.h" enum { RADIANCE_RAY_TYPE = 0, SHADOW_RAY_TYPE, RAY_TYPE_COUNT }; namespace osc { #include "launch_params.h" } using namespace osc; #define NUM_LIGHT_SAMPLES 16 #define NUM_PIXEL_SAMPLES 4 namespace osc { typedef LCG<16> Random; /*! launch parameters in constant memory, filled in by optix upon optixLaunch (this gets filled in from the buffer we pass to optixLaunch) */ extern "C" __constant__ LaunchParams optixLaunchParams; /*! per-ray data now captures random numebr generator, so programs can access RNG state */ struct PRD { Random random; V3f32 pixelColor; }; static __forceinline__ DEVICE void* unpackPointer(u32 i0, u32 i1) { const u64 uptr = static_cast<u64>(i0) << 32 | i1; void* ptr = reinterpret_cast<void*>(uptr); return ptr; } static __forceinline__ DEVICE void packPointer(void* ptr, u32& i0, u32& i1) { const u64 uptr = reinterpret_cast<u64>(ptr); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } template <typename T> static __forceinline__ DEVICE T* getPRD() { const u32 u0 = optixGetPayload_0(); const u32 u1 = optixGetPayload_1(); return reinterpret_cast<T*>(unpackPointer(u0, u1)); } //------------------------------------------------------------------------------ // closest hit and anyhit programs for radiance-type rays. // // Note eventually we will have to create one pair of those for each // ray type and each geometry type we want to render; but this // simple example doesn't use any actual geometries yet, so we only // create a single, dummy, set of them (we do have to have at least // one group of them to set up the SBT) //------------------------------------------------------------------------------ extern "C" __global__ void __closesthit__shadow() { /* not going to be used ... */ } extern "C" __global__ void __closesthit__radiance() { const TriangleMeshSBTData& sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); PRD& prd = *getPRD<PRD>(); // ------------------------------------------------------------------ // gather some basic hit information // ------------------------------------------------------------------ const i32 primID = optixGetPrimitiveIndex(); const V3i32 index = sbtData.index[primID]; const f32 u = optixGetTriangleBarycentrics().x; const f32 v = optixGetTriangleBarycentrics().y; // ------------------------------------------------------------------ // compute normal, using either shading normal (if avail), or // geometry normal (fallback) // ------------------------------------------------------------------ const V3f32& A = sbtData.vertex[index.x]; const V3f32& B = sbtData.vertex[index.y]; const V3f32& C = sbtData.vertex[index.z]; V3f32 Ng = cross(B - A, C - A); V3f32 Ns = (sbtData.normal.is_null()) ? Ng : ((1.f - u - v) * sbtData.normal[index.x] + u * sbtData.normal[index.y] + v * sbtData.normal[index.z]); // ------------------------------------------------------------------ // face-forward and normalize normals // ------------------------------------------------------------------ const V3f32 rayDir = optixGetWorldRayDirection(); if (dot(rayDir, Ng) > 0.f) Ng = -Ng; Ng = normalize(Ng); if (dot(Ng, Ns) < 0.f) Ns = Ns - 2.f * dot(Ng, Ns) * Ng; Ns = normalize(Ns); // ------------------------------------------------------------------ // compute diffuse material color, including diffuse texture, if // available // ------------------------------------------------------------------ V3f32 diffuseColor = sbtData.color; if (sbtData.has_texture && !sbtData.texcoord.is_null()) { const V2f32 tc = (1.f - u - v) * sbtData.texcoord[index.x] + u * sbtData.texcoord[index.y] + v * sbtData.texcoord[index.z]; V4f32 fromTexture = tex2D<float4>(sbtData.texture, tc.x, tc.y); diffuseColor = diffuseColor * fromTexture.xyz(); } // start with some ambient term V3f32 pixelColor = (0.01f + 0.1f * fabsf(dot(Ns, rayDir))) * diffuseColor; // ------------------------------------------------------------------ // compute shadow // ------------------------------------------------------------------ const V3f32 surfPos = (1.f - u - v) * sbtData.vertex[index.x] + u * sbtData.vertex[index.y] + v * sbtData.vertex[index.z]; const i32 numLightSamples = NUM_LIGHT_SAMPLES; for (i32 lightSampleID = 0; lightSampleID < numLightSamples; lightSampleID++) { // produce random light sample const V3f32 lightPos = optixLaunchParams.light.origin + prd.random() * optixLaunchParams.light.du + prd.random() * optixLaunchParams.light.dv; V3f32 lightDir = lightPos - surfPos; f32 lightDist = lightDir.length(); lightDir = normalize(lightDir); // trace shadow ray: const f32 NdotL = dot(lightDir, Ns); if (NdotL >= 0.f) { V3f32 lightVisibility(1.f); // the values we store the PRD poi32er in: u32 u0, u1; packPointer(&lightVisibility, u0, u1); optixTrace(optixLaunchParams.traversable, surfPos + 1e-3f * Ng, lightDir, 1e-3f, // tmin lightDist * (1.f - 1e-3f), // tmax 0.0f, // rayTime OptixVisibilityMask(255), // anyhit ON for shadow rays: OPTIX_RAY_FLAG_NONE, SHADOW_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride SHADOW_RAY_TYPE, // missSBTIndex u0, u1); pixelColor = pixelColor + lightVisibility * optixLaunchParams.light.power * diffuseColor * (NdotL / (lightDist * lightDist * numLightSamples)); } } prd.pixelColor = pixelColor; } extern "C" __global__ void __anyhit__radiance() { /*! for this simple example, this will remain empty */ } extern "C" __global__ void __anyhit__shadow() { // in this simple example, we terminate on ANY hit V3f32& prd = *getPRD<V3f32>(); prd = V3f32(0.f); optixTerminateRay(); } //------------------------------------------------------------------------------ // miss program that gets called for any ray that did not have a // valid i32ersection // // as with the anyhit/closest hit programs, in this example we only // need to have _some_ dummy function to set up a valid SBT // ------------------------------------------------------------------------------ extern "C" __global__ void __miss__radiance() { PRD& prd = *getPRD<PRD>(); // set to constant white as background color prd.pixelColor = V3f32(1.f); } extern "C" __global__ void __miss__shadow() { // misses shouldn't mess with shadow opacity - do nothing } //------------------------------------------------------------------------------ // ray gen program - the actual rendering happens in here //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__renderFrame() { // compute a test pattern based on pixel ID const i32 ix = optixGetLaunchIndex().x; const i32 iy = optixGetLaunchIndex().y; const i32 accum_id = optixLaunchParams.frame.accum_id; const auto& camera = optixLaunchParams.camera; PRD prd; prd.random.init(ix + accum_id * optixLaunchParams.frame.size.x, iy + accum_id * optixLaunchParams.frame.size.y); prd.pixelColor = V3f32(0.f); // the values we store the PRD poi32er in: u32 u0, u1; packPointer(&prd, u0, u1); i32 numPixelSamples = NUM_PIXEL_SAMPLES; V3f32 pixelColor(0.f); for (i32 sampleID = 0; sampleID < numPixelSamples; sampleID++) { // normalized screen plane position, in [0,1]^2 const V2f32 screen(V2f32(ix + prd.random(), iy + prd.random()) / V2f32(optixLaunchParams.frame.size.x, optixLaunchParams.frame.size.y)); // generate ray direction V3f32 rayDir = normalize(camera.direction + (screen.x - 0.5f) * camera.horizontal + (screen.y - 0.5f) * camera.vertical); optixTrace(optixLaunchParams.traversable, camera.position, rayDir, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, RADIANCE_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride RADIANCE_RAY_TYPE, // missSBTIndex u0, u1); pixelColor = pixelColor + prd.pixelColor; } const u32 fbIndex = ix + iy * optixLaunchParams.frame.size.x; optixLaunchParams.frame.color_buffer[fbIndex] = make_float4( pixelColor.x / numPixelSamples, pixelColor.y / numPixelSamples, pixelColor.z / numPixelSamples, 1.0f); } } // namespace osc
2ede0f31acbc4b2aae0016252274fcf5c5fca840.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ float4 read(const uchar4 * in, int x, int y, int width) { const uchar4 v = in[x + y * width]; return make_float4(v.x, v.y, v.z, v.w); } __device__ float4 compute_sobelx(const uchar4 * in, int x, int y, int w) { return -2*read(in,x-1,y-1,w) - read(in,x-1,y,w) - read(in,x-1,y+1,w) +2*read(in,x+1,y-1,w) + read(in,x+1,y,w) + read(in,x+1,y+1,w); } __device__ float4 compute_sobely(const uchar4 * in, int x, int y, int w) { return -2*read(in,x-1,y-1,w) - read(in,x,y-1,w) - read(in,x+1,y-1,w) +2*read(in,x-1,y+1,w) + read(in,x,y+1,w) + read(in,x+1,y+1,w); } __global__ void sobel(const uchar4 * in, uchar4 * sobelx, uchar4 * sobely, uchar4 * gradient, uchar4 * edges, const float primary_treshold, const float secondary_treshold, const int width, const int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; // array index const int idx = x + width * y; // inside image bounds check if (x >= width || y >= height) { return; } // kernel code if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { sobelx[idx] = make_uchar4(0,0,0,255); sobely[idx] = make_uchar4(0,0,0,255); gradient[idx] = make_uchar4(0,0,0,255); edges[idx] = make_uchar4(0,0,0,255); return; } float4 sx = compute_sobelx(in,x,y,width); float4 sy = compute_sobely(in,x,y,width); sobelx[idx] = make_uchar4(abs(sx.x), abs(sx.y), abs(sx.z), 255); sobely[idx] = make_uchar4(abs(sy.x), abs(sy.y), abs(sy.z), 255); float grad = (abs(sx.x)+abs(sx.y)+abs(sx.z)+ abs(sy.x)+abs(sy.y)+abs(sy.z))/3.0; gradient[idx] = make_uchar4(grad, grad, grad, 255); const bool prim_edge = grad > primary_treshold; const bool sec_edge = grad > secondary_treshold; const unsigned char edge = prim_edge * 255 | sec_edge * 125; edges[idx] = make_uchar4(edge,edge,edge,255); }
2ede0f31acbc4b2aae0016252274fcf5c5fca840.cu
__device__ float4 read(const uchar4 * in, int x, int y, int width) { const uchar4 v = in[x + y * width]; return make_float4(v.x, v.y, v.z, v.w); } __device__ float4 compute_sobelx(const uchar4 * in, int x, int y, int w) { return -2*read(in,x-1,y-1,w) - read(in,x-1,y,w) - read(in,x-1,y+1,w) +2*read(in,x+1,y-1,w) + read(in,x+1,y,w) + read(in,x+1,y+1,w); } __device__ float4 compute_sobely(const uchar4 * in, int x, int y, int w) { return -2*read(in,x-1,y-1,w) - read(in,x,y-1,w) - read(in,x+1,y-1,w) +2*read(in,x-1,y+1,w) + read(in,x,y+1,w) + read(in,x+1,y+1,w); } __global__ void sobel(const uchar4 * in, uchar4 * sobelx, uchar4 * sobely, uchar4 * gradient, uchar4 * edges, const float primary_treshold, const float secondary_treshold, const int width, const int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; // array index const int idx = x + width * y; // inside image bounds check if (x >= width || y >= height) { return; } // kernel code if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { sobelx[idx] = make_uchar4(0,0,0,255); sobely[idx] = make_uchar4(0,0,0,255); gradient[idx] = make_uchar4(0,0,0,255); edges[idx] = make_uchar4(0,0,0,255); return; } float4 sx = compute_sobelx(in,x,y,width); float4 sy = compute_sobely(in,x,y,width); sobelx[idx] = make_uchar4(abs(sx.x), abs(sx.y), abs(sx.z), 255); sobely[idx] = make_uchar4(abs(sy.x), abs(sy.y), abs(sy.z), 255); float grad = (abs(sx.x)+abs(sx.y)+abs(sx.z)+ abs(sy.x)+abs(sy.y)+abs(sy.z))/3.0; gradient[idx] = make_uchar4(grad, grad, grad, 255); const bool prim_edge = grad > primary_treshold; const bool sec_edge = grad > secondary_treshold; const unsigned char edge = prim_edge * 255 | sec_edge * 125; edges[idx] = make_uchar4(edge,edge,edge,255); }
de3996c53124a1144cd3976979783f78890f3b95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } #define TB 128 #define DISP_MAX 256 __global__ void remove_nonvisible(float *y, int size, int size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % size3; if (y[id] >= x) { y[id] = 0; } } }
de3996c53124a1144cd3976979783f78890f3b95.cu
#include "includes.h" extern "C" { } #define TB 128 #define DISP_MAX 256 __global__ void remove_nonvisible(float *y, int size, int size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % size3; if (y[id] >= x) { y[id] = 0; } } }
b487975cc416bc399be3893546011985f4d8cb94.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cudaDoEigen.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *m = NULL; hipMalloc(&m, XSIZE*YSIZE); int rows = XSIZE; int columns = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cudaDoEigen), dim3(gridBlock),dim3(threadBlock), 0, 0, m,rows,columns); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cudaDoEigen), dim3(gridBlock),dim3(threadBlock), 0, 0, m,rows,columns); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cudaDoEigen), dim3(gridBlock),dim3(threadBlock), 0, 0, m,rows,columns); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b487975cc416bc399be3893546011985f4d8cb94.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cudaDoEigen.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *m = NULL; cudaMalloc(&m, XSIZE*YSIZE); int rows = XSIZE; int columns = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cudaDoEigen<<<gridBlock,threadBlock>>>(m,rows,columns); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cudaDoEigen<<<gridBlock,threadBlock>>>(m,rows,columns); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cudaDoEigen<<<gridBlock,threadBlock>>>(m,rows,columns); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1ad4c27201e986a597461963c4efb67815094ac5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "graphs.h" __host__ void GenerateAllGraphs(unsigned int initial_order, unsigned int final_order, int max_vertex_order, int lattice_type, graph*& graphs) { for( int i = initial_order; i < final_order, i++) { hipMalloc( (void**) &graphs[i], ); //put a formula for maximum number of distinct graphs here GenerateNewGraphs( graphs[i].adj_mat, i, graphs[i].count, graphs[i+1].adj_mat, graphs[i + 1].flags, max_vertex_order, lattice_type); thrust::device_ptr<int> flag_ptr(graphs[i+1].flags); graphs[i+1].count = thrust::reduce(flag_ptr, flag_ptr + ); thrust::sort_by_key(flag_ptr, flag_ptr + , graphs[i].adj_mat); } } __global__ void GenerateNewGraphs( int* old_graphs, int old_graph_order, unsigned int num_old_graphs, int* new_graphs, unsigned int* new_graphs_flags, int max_vertex_order, int lattice_type) { unsigned int gid = threadIdx.x + blockIdx.x * blockDim.x; unsigned int row = threadIdx.x % old_graph_order; int2 vertex_order; int matrix_size = 0; int row_start = 0; unsigned int block_start = matrix_size * (threadIdx.x / old_graph_order); for (int i = 0; i < old_graph_order; i++) { matrix_size += i; row_start += row > i ? (old_graph_order - i) : 0; } __shared__ int s_old_graphs[1024]; __shared__ int s_new_graphs[1024]; if (gid < num_old_graphs * old_graph_order) { for( int i = 0; i < old_graph_order - row; i++) { s_old_graphs[ block_start + row_start + i ] = old_graphs[ (gid / old_graph_order) * matrix_size + row_start + i ]; } vertex_order.x = s_old_graphs[ block_start + row_start ]; } __syncthreads(); int new_matrix_size = matrix_size + old_graph_order + 1; int new_matrix_start = (gid / old_graph_order)*new_matrix_size*(matrix_size - old_graph_order); int new_row_start = 0; if (gid < num_old_graphs * old_graph_order) { for( int i = 1; i < old_graph_order - row; i++) { for( int k = 0; k < old_graph_order; k++) { new_row_start += (i > k) ? k : 0; } vertex_order.y = s_old_graphs[block_start + new_row_start]; for ( int j = 0; j < new_matrix_size; j++) { new_graphs[ new_matrix_start + i*new_matrix_size + j ] = ( j == row_start + i) ? 1 : s_old_graphs[block_start + j]; } vertex_order.x += 1; vertex_order.y += 1; new_graphs_flags[ (gid/ ( old_graphs_order + 1 ) )*(matrix_size - old_graph_order) ] = (vertex_order.x <= max_vertex_order + 1) && (vertex_order.y <= max_vertex_order + 1); } } } __global__ void FindCanonicalGraphs( int* graphs, int graph_order ) { }
1ad4c27201e986a597461963c4efb67815094ac5.cu
#include "graphs.h" __host__ void GenerateAllGraphs(unsigned int initial_order, unsigned int final_order, int max_vertex_order, int lattice_type, graph*& graphs) { for( int i = initial_order; i < final_order, i++) { cudaMalloc( (void**) &graphs[i], ); //put a formula for maximum number of distinct graphs here GenerateNewGraphs( graphs[i].adj_mat, i, graphs[i].count, graphs[i+1].adj_mat, graphs[i + 1].flags, max_vertex_order, lattice_type); thrust::device_ptr<int> flag_ptr(graphs[i+1].flags); graphs[i+1].count = thrust::reduce(flag_ptr, flag_ptr + ); thrust::sort_by_key(flag_ptr, flag_ptr + , graphs[i].adj_mat); } } __global__ void GenerateNewGraphs( int* old_graphs, int old_graph_order, unsigned int num_old_graphs, int* new_graphs, unsigned int* new_graphs_flags, int max_vertex_order, int lattice_type) { unsigned int gid = threadIdx.x + blockIdx.x * blockDim.x; unsigned int row = threadIdx.x % old_graph_order; int2 vertex_order; int matrix_size = 0; int row_start = 0; unsigned int block_start = matrix_size * (threadIdx.x / old_graph_order); for (int i = 0; i < old_graph_order; i++) { matrix_size += i; row_start += row > i ? (old_graph_order - i) : 0; } __shared__ int s_old_graphs[1024]; __shared__ int s_new_graphs[1024]; if (gid < num_old_graphs * old_graph_order) { for( int i = 0; i < old_graph_order - row; i++) { s_old_graphs[ block_start + row_start + i ] = old_graphs[ (gid / old_graph_order) * matrix_size + row_start + i ]; } vertex_order.x = s_old_graphs[ block_start + row_start ]; } __syncthreads(); int new_matrix_size = matrix_size + old_graph_order + 1; int new_matrix_start = (gid / old_graph_order)*new_matrix_size*(matrix_size - old_graph_order); int new_row_start = 0; if (gid < num_old_graphs * old_graph_order) { for( int i = 1; i < old_graph_order - row; i++) { for( int k = 0; k < old_graph_order; k++) { new_row_start += (i > k) ? k : 0; } vertex_order.y = s_old_graphs[block_start + new_row_start]; for ( int j = 0; j < new_matrix_size; j++) { new_graphs[ new_matrix_start + i*new_matrix_size + j ] = ( j == row_start + i) ? 1 : s_old_graphs[block_start + j]; } vertex_order.x += 1; vertex_order.y += 1; new_graphs_flags[ (gid/ ( old_graphs_order + 1 ) )*(matrix_size - old_graph_order) ] = (vertex_order.x <= max_vertex_order + 1) && (vertex_order.y <= max_vertex_order + 1); } } } __global__ void FindCanonicalGraphs( int* graphs, int graph_order ) { }
0799c90262f75414d09ea817892ac615bcab2e9b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "psroi_pooling_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void PSROIPoolForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int group_size, const int output_dim, const float* bottom_rois, float* top_data, int* mapping_channel) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = static_cast<float>(round(bottom_rois[1])) * spatial_scale; float roi_start_h = static_cast<float>(round(bottom_rois[2])) * spatial_scale; float roi_end_w = static_cast<float>(round(bottom_rois[3]) + 1.) * spatial_scale; float roi_end_h = static_cast<float>(round(bottom_rois[4]) + 1.) * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 float roi_height = max(roi_end_h - roi_start_h, 0.1); float bin_size_h = (float)(roi_height) / (float)(pooled_height); float bin_size_w = (float)(roi_width) / (float)(pooled_width); int hstart = floor(static_cast<float>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<float>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<float>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<float>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; float out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; out_sum += bottom_data[bottom_index]; } } float bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? 0. : out_sum/bin_area; mapping_channel[index] = c; } } int PSROIPoolForwardLauncher( const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const float* bottom_rois, const int group_size, const int output_dim, float* top_data, int* mapping_channel, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = output_dim * pooled_height * pooled_width * num_rois; hipError_t err; hipLaunchKernelGGL(( PSROIPoolForward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, bottom_data, spatial_scale, height, width, channels, pooled_height, pooled_width, group_size, output_dim, bottom_rois, top_data, mapping_channel); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void PSROIPoolBackward(const int nthreads, const float* top_diff, const int* mapping_channel, const int num_rois, const float spatial_scale, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int output_dim, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = static_cast<float>(round(bottom_rois[1])) * spatial_scale; float roi_start_h = static_cast<float>(round(bottom_rois[2])) * spatial_scale; float roi_end_w = static_cast<float>(round(bottom_rois[3]) + 1.) * spatial_scale; float roi_end_h = static_cast<float>(round(bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 float roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom float bin_size_h = roi_height / static_cast<float>(pooled_height); float bin_size_w = roi_width / static_cast<float>(pooled_width); int hstart = floor(static_cast<float>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<float>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<float>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<float>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; float* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; float bin_area = (hend - hstart)*(wend - wstart); float diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; //caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); atomicAdd(offset_bottom_diff + bottom_index, diff_val); } } } } int PSROIPoolBackwardLauncher(const float* top_diff, const int* mapping_channel, const int batch_size, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_width, const int pooled_height, const int output_dim, float* bottom_diff, const float* bottom_rois, hipStream_t stream) { const int kThreadsPerBlock = 1024; //const int output_size = output_dim * height * width * channels; const int output_size = output_dim * pooled_height * pooled_width * num_rois; hipError_t err; hipLaunchKernelGGL(( PSROIPoolBackward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, top_diff, mapping_channel, num_rois, spatial_scale, height, width, channels, pooled_height, pooled_width, output_dim, bottom_diff, bottom_rois); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
0799c90262f75414d09ea817892ac615bcab2e9b.cu
#ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "psroi_pooling_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void PSROIPoolForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int group_size, const int output_dim, const float* bottom_rois, float* top_data, int* mapping_channel) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = static_cast<float>(round(bottom_rois[1])) * spatial_scale; float roi_start_h = static_cast<float>(round(bottom_rois[2])) * spatial_scale; float roi_end_w = static_cast<float>(round(bottom_rois[3]) + 1.) * spatial_scale; float roi_end_h = static_cast<float>(round(bottom_rois[4]) + 1.) * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 float roi_height = max(roi_end_h - roi_start_h, 0.1); float bin_size_h = (float)(roi_height) / (float)(pooled_height); float bin_size_w = (float)(roi_width) / (float)(pooled_width); int hstart = floor(static_cast<float>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<float>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<float>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<float>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; float out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; out_sum += bottom_data[bottom_index]; } } float bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? 0. : out_sum/bin_area; mapping_channel[index] = c; } } int PSROIPoolForwardLauncher( const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const float* bottom_rois, const int group_size, const int output_dim, float* top_data, int* mapping_channel, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = output_dim * pooled_height * pooled_width * num_rois; cudaError_t err; PSROIPoolForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, bottom_data, spatial_scale, height, width, channels, pooled_height, pooled_width, group_size, output_dim, bottom_rois, top_data, mapping_channel); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void PSROIPoolBackward(const int nthreads, const float* top_diff, const int* mapping_channel, const int num_rois, const float spatial_scale, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int output_dim, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = static_cast<float>(round(bottom_rois[1])) * spatial_scale; float roi_start_h = static_cast<float>(round(bottom_rois[2])) * spatial_scale; float roi_end_w = static_cast<float>(round(bottom_rois[3]) + 1.) * spatial_scale; float roi_end_h = static_cast<float>(round(bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 float roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom float bin_size_h = roi_height / static_cast<float>(pooled_height); float bin_size_w = roi_width / static_cast<float>(pooled_width); int hstart = floor(static_cast<float>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<float>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<float>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<float>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; float* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; float bin_area = (hend - hstart)*(wend - wstart); float diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; //caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); atomicAdd(offset_bottom_diff + bottom_index, diff_val); } } } } int PSROIPoolBackwardLauncher(const float* top_diff, const int* mapping_channel, const int batch_size, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_width, const int pooled_height, const int output_dim, float* bottom_diff, const float* bottom_rois, cudaStream_t stream) { const int kThreadsPerBlock = 1024; //const int output_size = output_dim * height * width * channels; const int output_size = output_dim * pooled_height * pooled_width * num_rois; cudaError_t err; PSROIPoolBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, top_diff, mapping_channel, num_rois, spatial_scale, height, width, channels, pooled_height, pooled_width, output_dim, bottom_diff, bottom_rois); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
b2d571c85ed31cd8c3b8c2e869eee48a89c13308.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //====================================== // // // CPU //====================================== #include"stdafx.h" #include"Activation_DATA.hpp" #include"Activation_FUNC.hpp" #include"Activation_Base.h" #include"Activation_GPU.cuh" #include"Activation_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; #define BLOCK_SIZE (32) namespace { //=========================== // Leaky-ReLU //=========================== __global__ void cuda_func_activation_LeakyReLU(const F32* i_lpInputBuffer, F32* o_lpOutputBuffer, F32 i_alpha, U32 i_bufferSize) { const U32 inputNum = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(inputNum >= i_bufferSize) // warp... return; o_lpOutputBuffer[inputNum] = i_lpInputBuffer[inputNum] * ((i_lpInputBuffer[inputNum]>0) + i_alpha * (i_lpInputBuffer[inputNum]<=0)); } __global__ void cuda_func_dactivation_LeakyReLU(const F32* i_lpOutputBuffer, const F32* i_lpDOutputBuffer, F32* o_lpOutputBuffer, F32 i_alpha, U32 i_bufferSize) { const U32 inputNum = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(inputNum >= i_bufferSize) // warp... return; o_lpOutputBuffer[inputNum] = ((i_lpOutputBuffer[inputNum]>0) + i_alpha * (i_lpOutputBuffer[inputNum]<=0)) * i_lpDOutputBuffer[inputNum]; } } namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** */ Activation_GPU::Activation_GPU(Gravisbell::GUID guid, Activation_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) : Activation_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1)) , layerData (i_layerData) /**< */ , inputBufferCount (0) /**< */ , outputBufferCount (0) /**< */ , cudnnHandle (NULL) , activDesc (NULL) , inputTensorDesc (NULL) , outputTensorDesc (NULL) { cudnnCreate(&cudnnHandle); cudnnCreateTensorDescriptor(&inputTensorDesc); cudnnCreateTensorDescriptor(&outputTensorDesc); cudnnCreateActivationDescriptor(&activDesc); } /** */ Activation_GPU::~Activation_GPU() { if(inputTensorDesc) cudnnDestroyTensorDescriptor(inputTensorDesc); if(outputTensorDesc) cudnnDestroyTensorDescriptor(outputTensorDesc); if(activDesc) cudnnDestroyActivationDescriptor(activDesc); if(cudnnHandle) cudnnDestroy(cudnnHandle); } //================================ // //================================ /** */ U32 Activation_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** . @return 0 */ ErrorCode Activation_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // //=========================== /** */ ILayerData& Activation_GPU::GetLayerData() { return this->layerData; } const ILayerData& Activation_GPU::GetLayerData()const { return this->layerData; } //================================ // //================================ /** .() @param batchSize . NN. PreProcessLearnLoop. */ ErrorCode Activation_GPU::PreProcessLearn() { ErrorCode errorCode = this->PreProcessCalculate(); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; return ErrorCode::ERROR_CODE_NONE; } /** .() @param batchSize . NN. Calculate. */ ErrorCode Activation_GPU::PreProcessCalculate() { // this->inputBufferCount = this->GetInputBufferCount(); if(this->inputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; // this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; // switch(this->layerData.layerStructure.ActivationType) { // lenear case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_lenear: break; default: { int n = this->GetBatchSize(); int c = this->GetOutputDataStruct().ch; int h = this->GetOutputDataStruct().z * this->GetOutputDataStruct().y; int w = this->GetOutputDataStruct().x; const int nDims = 4; int dimA[nDims] = {n, c, h, w}; int strideA[nDims] = {c*h*w, h*w, w, 1}; cudnnStatus_t err = cudnnSetTensorNdDescriptor(this->outputTensorDesc, CUDNN_DATA_FLOAT, 4, dimA, strideA ); if(err != 0) break; err = cudnnSetTensorNdDescriptor(this->inputTensorDesc, CUDNN_DATA_FLOAT, 4, dimA, strideA ); if(err != 0) break; } break; } // switch(this->layerData.layerStructure.ActivationType) { // lenear case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_lenear: break; // Sigmoid case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid_crossEntropy: default: cudnnSetActivationDescriptor(activDesc, CUDNN_ACTIVATION_SIGMOID, CUDNN_PROPAGATE_NAN, 0.0); break; // ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_ReLU: cudnnSetActivationDescriptor(activDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0); break; // Leaky-ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_LeakyReLU: break; // tanh case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_tanh: cudnnSetActivationDescriptor(activDesc, CUDNN_ACTIVATION_TANH, CUDNN_PROPAGATE_NAN, 0.0); break; // SoftMax case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH: break; case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL_crossEntropy: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH_crossEntropy: break; } return ErrorCode::ERROR_CODE_NONE; } /** . Calculate. */ ErrorCode Activation_GPU::PreProcessLoop() { return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** . @param lpInputBuffer . GetInputBufferCount @return 0 */ ErrorCode Activation_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lpInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer) { switch(this->layerData.layerStructure.ActivationType) { // lenear case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_lenear: hipMemcpy(o_lppOutputBuffer, i_lpInputBuffer, sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), hipMemcpyDeviceToDevice); break; default: // Sigmoid case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid_crossEntropy: // ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_ReLU: // tanh case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_tanh: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnActivationForward( this->cudnnHandle, this->activDesc, &alpha, inputTensorDesc, i_lpInputBuffer, &beta, outputTensorDesc, &o_lppOutputBuffer[0]); } break; // Leaky-ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_LeakyReLU: { U32 MAX_BUFFER_SIZE = 32768; U32 bufferSize = this->inputBufferCount * this->GetBatchSize(); U32 remainingSize = bufferSize; while(remainingSize > 0) { U32 bufferCount = min(remainingSize, MAX_BUFFER_SIZE); dim3 grid((bufferCount +(BLOCK_SIZE - 1))/BLOCK_SIZE , 1, 1); dim3 block(BLOCK_SIZE, 1, 1); U32 offset = bufferSize - remainingSize; hipLaunchKernelGGL(( cuda_func_activation_LeakyReLU), dim3(grid), dim3(block), 0, 0, &i_lpInputBuffer[offset], &o_lppOutputBuffer[offset], this->layerData.layerStructure.LeakyReLU_alpha, bufferCount); remainingSize = max(0, (S32)remainingSize-(S32)MAX_BUFFER_SIZE); } } break; // softmax case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL_crossEntropy: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnStatus_t err = cudnnSoftmaxForward( this->cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, this->inputTensorDesc, i_lpInputBuffer, &beta, this->outputTensorDesc, &o_lppOutputBuffer[0]); } break; case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH_crossEntropy: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnSoftmaxForward( this->cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, this->inputTensorDesc, i_lpInputBuffer, &beta, this->outputTensorDesc, &o_lppOutputBuffer[0]); } break; } #ifdef _DEBUG std::vector<F32> lpTmpInput(this->GetInputBufferCount() * this->GetBatchSize()); hipMemcpy(&lpTmpInput[0], i_lpInputBuffer, sizeof(float)*lpTmpInput.size(), hipMemcpyDeviceToHost); std::vector<F32> lpTmpOutput(this->GetOutputBufferCount() * this->GetBatchSize()); hipMemcpy(&lpTmpOutput[0], o_lppOutputBuffer, sizeof(float)*lpTmpOutput.size(), hipMemcpyDeviceToHost); #endif return ErrorCode::ERROR_CODE_NONE; } //================================ // //================================ /** .. Calculate. @param o_lppDInputBuffer . [GetBatchSize()][GetInputBufferCount()]. @param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()] */ ErrorCode Activation_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { // if(o_lppDInputBuffer) { switch(this->layerData.layerStructure.ActivationType) { // lenear case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_lenear: hipMemcpy(o_lppDInputBuffer, i_lppDOutputBuffer, sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), hipMemcpyDeviceToDevice); break; default: // Sigmoid case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid: // ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_ReLU: // tanh case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_tanh: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnActivationBackward( this->cudnnHandle, this->activDesc, &alpha, this->outputTensorDesc, // i_lppOutputBuffer, // this->outputTensorDesc, i_lppDOutputBuffer, // this->inputTensorDesc, i_lppInputBuffer, // &beta, this->inputTensorDesc, o_lppDInputBuffer // ); } break; // Leaky-ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_LeakyReLU: { U32 MAX_BUFFER_SIZE = 32768; U32 bufferSize = this->inputBufferCount * this->GetBatchSize(); U32 remainingSize = bufferSize; while(remainingSize > 0) { U32 bufferCount = min(remainingSize, MAX_BUFFER_SIZE); dim3 grid((bufferCount +(BLOCK_SIZE - 1))/BLOCK_SIZE , 1, 1); dim3 block(BLOCK_SIZE, 1, 1); U32 offset = bufferSize - remainingSize; hipLaunchKernelGGL(( cuda_func_dactivation_LeakyReLU), dim3(grid), dim3(block), 0, 0, &i_lppOutputBuffer[offset], &i_lppDOutputBuffer[offset], &o_lppDInputBuffer[offset], this->layerData.layerStructure.LeakyReLU_alpha, bufferCount); remainingSize = max(0, (S32)remainingSize-(S32)MAX_BUFFER_SIZE); } } break; // softmax case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnSoftmaxBackward( this->cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, this->outputTensorDesc, i_lppOutputBuffer, this->outputTensorDesc, i_lppDOutputBuffer, &beta, this->inputTensorDesc, o_lppDInputBuffer ); } break; case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnSoftmaxBackward( this->cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, this->outputTensorDesc, i_lppOutputBuffer, this->outputTensorDesc, i_lppDOutputBuffer, &beta, this->inputTensorDesc, o_lppDInputBuffer ); } break; case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid_crossEntropy: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL_crossEntropy: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH_crossEntropy: hipMemcpy(o_lppDInputBuffer, i_lppDOutputBuffer, sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), hipMemcpyDeviceToDevice); break; } } #ifdef _DEBUG std::vector<F32> lpTmpInput(this->GetInputBufferCount() * this->GetBatchSize()); hipMemcpy(&lpTmpInput[0], i_lppInputBuffer, sizeof(float)*lpTmpInput.size(), hipMemcpyDeviceToHost); std::vector<F32> lpTmpOutput(this->GetOutputBufferCount() * this->GetBatchSize()); hipMemcpy(&lpTmpOutput[0], i_lppOutputBuffer, sizeof(float)*lpTmpOutput.size(), hipMemcpyDeviceToHost); std::vector<float> lpTmpDOutputBuffer(this->GetBatchSize() * this->outputBufferCount); hipMemcpy(&lpTmpDOutputBuffer[0], i_lppDOutputBuffer, sizeof(float)*lpTmpDOutputBuffer.size(), hipMemcpyDeviceToHost); std::vector<float> lpTmpDInputBuffer(this->GetBatchSize() * this->inputBufferCount); hipMemcpy(&lpTmpDInputBuffer[0], o_lppDInputBuffer, sizeof(float)*lpTmpDInputBuffer.size(), hipMemcpyDeviceToHost); #endif return ErrorCode::ERROR_CODE_NONE; } /** . Calculate. @param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()]. */ ErrorCode Activation_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer); } } // Gravisbell; } // Layer; } // NeuralNetwork;
b2d571c85ed31cd8c3b8c2e869eee48a89c13308.cu
//====================================== // フィードフォワードニューラルネットワークの統合処理レイヤー // 結合、活性化 // CPU処理用 //====================================== #include"stdafx.h" #include"Activation_DATA.hpp" #include"Activation_FUNC.hpp" #include"Activation_Base.h" #include"Activation_GPU.cuh" #include"Activation_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; #define BLOCK_SIZE (32) namespace { //=========================== // Leaky-ReLU //=========================== __global__ void cuda_func_activation_LeakyReLU(const F32* i_lpInputBuffer, F32* o_lpOutputBuffer, F32 i_alpha, U32 i_bufferSize) { const U32 inputNum = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(inputNum >= i_bufferSize) // 分岐するが末尾のwarpだけなので、処理速度に影響はないはず... return; o_lpOutputBuffer[inputNum] = i_lpInputBuffer[inputNum] * ((i_lpInputBuffer[inputNum]>0) + i_alpha * (i_lpInputBuffer[inputNum]<=0)); } __global__ void cuda_func_dactivation_LeakyReLU(const F32* i_lpOutputBuffer, const F32* i_lpDOutputBuffer, F32* o_lpOutputBuffer, F32 i_alpha, U32 i_bufferSize) { const U32 inputNum = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(inputNum >= i_bufferSize) // 分岐するが末尾のwarpだけなので、処理速度に影響はないはず... return; o_lpOutputBuffer[inputNum] = ((i_lpOutputBuffer[inputNum]>0) + i_alpha * (i_lpOutputBuffer[inputNum]<=0)) * i_lpDOutputBuffer[inputNum]; } } namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** コンストラクタ */ Activation_GPU::Activation_GPU(Gravisbell::GUID guid, Activation_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) : Activation_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1)) , layerData (i_layerData) /**< レイヤーデータ */ , inputBufferCount (0) /**< 入力バッファ数 */ , outputBufferCount (0) /**< 出力バッファ数 */ , cudnnHandle (NULL) , activDesc (NULL) , inputTensorDesc (NULL) , outputTensorDesc (NULL) { cudnnCreate(&cudnnHandle); cudnnCreateTensorDescriptor(&inputTensorDesc); cudnnCreateTensorDescriptor(&outputTensorDesc); cudnnCreateActivationDescriptor(&activDesc); } /** デストラクタ */ Activation_GPU::~Activation_GPU() { if(inputTensorDesc) cudnnDestroyTensorDescriptor(inputTensorDesc); if(outputTensorDesc) cudnnDestroyTensorDescriptor(outputTensorDesc); if(activDesc) cudnnDestroyActivationDescriptor(activDesc); if(cudnnHandle) cudnnDestroy(cudnnHandle); } //================================ // 基本処理 //================================ /** レイヤー種別の取得 */ U32 Activation_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** 初期化. 各ニューロンの値をランダムに初期化 @return 成功した場合0 */ ErrorCode Activation_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // レイヤーデータ関連 //=========================== /** レイヤーデータを取得する */ ILayerData& Activation_GPU::GetLayerData() { return this->layerData; } const ILayerData& Activation_GPU::GetLayerData()const { return this->layerData; } //================================ // 演算処理 //================================ /** 演算前処理を実行する.(学習用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */ ErrorCode Activation_GPU::PreProcessLearn() { ErrorCode errorCode = this->PreProcessCalculate(); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; return ErrorCode::ERROR_CODE_NONE; } /** 演算前処理を実行する.(演算用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode Activation_GPU::PreProcessCalculate() { // 入力バッファ数を確認 this->inputBufferCount = this->GetInputBufferCount(); if(this->inputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; // 出力バッファ数を確認 this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; // 出力バッファを作成 switch(this->layerData.layerStructure.ActivationType) { // lenear case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_lenear: break; default: { int n = this->GetBatchSize(); int c = this->GetOutputDataStruct().ch; int h = this->GetOutputDataStruct().z * this->GetOutputDataStruct().y; int w = this->GetOutputDataStruct().x; const int nDims = 4; int dimA[nDims] = {n, c, h, w}; int strideA[nDims] = {c*h*w, h*w, w, 1}; cudnnStatus_t err = cudnnSetTensorNdDescriptor(this->outputTensorDesc, CUDNN_DATA_FLOAT, 4, dimA, strideA ); if(err != 0) break; err = cudnnSetTensorNdDescriptor(this->inputTensorDesc, CUDNN_DATA_FLOAT, 4, dimA, strideA ); if(err != 0) break; } break; } // 活性化関数を設定 switch(this->layerData.layerStructure.ActivationType) { // lenear case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_lenear: break; // Sigmoid case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid_crossEntropy: default: cudnnSetActivationDescriptor(activDesc, CUDNN_ACTIVATION_SIGMOID, CUDNN_PROPAGATE_NAN, 0.0); break; // ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_ReLU: cudnnSetActivationDescriptor(activDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0); break; // Leaky-ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_LeakyReLU: break; // tanh case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_tanh: cudnnSetActivationDescriptor(activDesc, CUDNN_ACTIVATION_TANH, CUDNN_PROPAGATE_NAN, 0.0); break; // SoftMax系 case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH: break; case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL_crossEntropy: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH_crossEntropy: break; } return ErrorCode::ERROR_CODE_NONE; } /** ループの初期化処理.データセットの実行開始前に実行する 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode Activation_GPU::PreProcessLoop() { return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** 演算処理を実行する. @param lpInputBuffer 入力データバッファ. GetInputBufferCountで取得した値の要素数が必要 @return 成功した場合0が返る */ ErrorCode Activation_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lpInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer) { switch(this->layerData.layerStructure.ActivationType) { // lenear case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_lenear: cudaMemcpy(o_lppOutputBuffer, i_lpInputBuffer, sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), cudaMemcpyDeviceToDevice); break; default: // Sigmoid case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid_crossEntropy: // ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_ReLU: // tanh case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_tanh: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnActivationForward( this->cudnnHandle, this->activDesc, &alpha, inputTensorDesc, i_lpInputBuffer, &beta, outputTensorDesc, &o_lppOutputBuffer[0]); } break; // Leaky-ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_LeakyReLU: { U32 MAX_BUFFER_SIZE = 32768; U32 bufferSize = this->inputBufferCount * this->GetBatchSize(); U32 remainingSize = bufferSize; while(remainingSize > 0) { U32 bufferCount = min(remainingSize, MAX_BUFFER_SIZE); dim3 grid((bufferCount +(BLOCK_SIZE - 1))/BLOCK_SIZE , 1, 1); dim3 block(BLOCK_SIZE, 1, 1); U32 offset = bufferSize - remainingSize; cuda_func_activation_LeakyReLU<<<grid, block>>>( &i_lpInputBuffer[offset], &o_lppOutputBuffer[offset], this->layerData.layerStructure.LeakyReLU_alpha, bufferCount); remainingSize = max(0, (S32)remainingSize-(S32)MAX_BUFFER_SIZE); } } break; // softmax case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL_crossEntropy: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnStatus_t err = cudnnSoftmaxForward( this->cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, this->inputTensorDesc, i_lpInputBuffer, &beta, this->outputTensorDesc, &o_lppOutputBuffer[0]); } break; case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH_crossEntropy: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnSoftmaxForward( this->cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, this->inputTensorDesc, i_lpInputBuffer, &beta, this->outputTensorDesc, &o_lppOutputBuffer[0]); } break; } #ifdef _DEBUG std::vector<F32> lpTmpInput(this->GetInputBufferCount() * this->GetBatchSize()); cudaMemcpy(&lpTmpInput[0], i_lpInputBuffer, sizeof(float)*lpTmpInput.size(), cudaMemcpyDeviceToHost); std::vector<F32> lpTmpOutput(this->GetOutputBufferCount() * this->GetBatchSize()); cudaMemcpy(&lpTmpOutput[0], o_lppOutputBuffer, sizeof(float)*lpTmpOutput.size(), cudaMemcpyDeviceToHost); #endif return ErrorCode::ERROR_CODE_NONE; } //================================ // 学習処理 //================================ /** 入力誤差計算をを実行する.学習せずに入力誤差を取得したい場合に使用する. 入力信号、出力信号は直前のCalculateの値を参照する. @param o_lppDInputBuffer 入力誤差差分格納先レイヤー. [GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の要素数が必要. @param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要な配列 直前の計算結果を使用する */ ErrorCode Activation_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { // 入力誤差計算 if(o_lppDInputBuffer) { switch(this->layerData.layerStructure.ActivationType) { // lenear case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_lenear: cudaMemcpy(o_lppDInputBuffer, i_lppDOutputBuffer, sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), cudaMemcpyDeviceToDevice); break; default: // Sigmoid case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid: // ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_ReLU: // tanh case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_tanh: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnActivationBackward( this->cudnnHandle, this->activDesc, &alpha, this->outputTensorDesc, // 出力データ構造 i_lppOutputBuffer, // 出力データ this->outputTensorDesc, i_lppDOutputBuffer, // 出力誤差 this->inputTensorDesc, i_lppInputBuffer, // 入力 &beta, this->inputTensorDesc, o_lppDInputBuffer // 入力誤差 ); } break; // Leaky-ReLU case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_LeakyReLU: { U32 MAX_BUFFER_SIZE = 32768; U32 bufferSize = this->inputBufferCount * this->GetBatchSize(); U32 remainingSize = bufferSize; while(remainingSize > 0) { U32 bufferCount = min(remainingSize, MAX_BUFFER_SIZE); dim3 grid((bufferCount +(BLOCK_SIZE - 1))/BLOCK_SIZE , 1, 1); dim3 block(BLOCK_SIZE, 1, 1); U32 offset = bufferSize - remainingSize; cuda_func_dactivation_LeakyReLU<<<grid, block>>>( &i_lppOutputBuffer[offset], &i_lppDOutputBuffer[offset], &o_lppDInputBuffer[offset], this->layerData.layerStructure.LeakyReLU_alpha, bufferCount); remainingSize = max(0, (S32)remainingSize-(S32)MAX_BUFFER_SIZE); } } break; // softmax case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnSoftmaxBackward( this->cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, this->outputTensorDesc, i_lppOutputBuffer, this->outputTensorDesc, i_lppDOutputBuffer, &beta, this->inputTensorDesc, o_lppDInputBuffer ); } break; case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH: { F32 alpha = 1.0f; F32 beta = 0.0f; cudnnSoftmaxBackward( this->cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, this->outputTensorDesc, i_lppOutputBuffer, this->outputTensorDesc, i_lppDOutputBuffer, &beta, this->inputTensorDesc, o_lppDInputBuffer ); } break; case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_sigmoid_crossEntropy: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_ALL_crossEntropy: case Gravisbell::Layer::NeuralNetwork::Activation::LayerStructure::ActivationType_softmax_CH_crossEntropy: cudaMemcpy(o_lppDInputBuffer, i_lppDOutputBuffer, sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), cudaMemcpyDeviceToDevice); break; } } #ifdef _DEBUG std::vector<F32> lpTmpInput(this->GetInputBufferCount() * this->GetBatchSize()); cudaMemcpy(&lpTmpInput[0], i_lppInputBuffer, sizeof(float)*lpTmpInput.size(), cudaMemcpyDeviceToHost); std::vector<F32> lpTmpOutput(this->GetOutputBufferCount() * this->GetBatchSize()); cudaMemcpy(&lpTmpOutput[0], i_lppOutputBuffer, sizeof(float)*lpTmpOutput.size(), cudaMemcpyDeviceToHost); std::vector<float> lpTmpDOutputBuffer(this->GetBatchSize() * this->outputBufferCount); cudaMemcpy(&lpTmpDOutputBuffer[0], i_lppDOutputBuffer, sizeof(float)*lpTmpDOutputBuffer.size(), cudaMemcpyDeviceToHost); std::vector<float> lpTmpDInputBuffer(this->GetBatchSize() * this->inputBufferCount); cudaMemcpy(&lpTmpDInputBuffer[0], o_lppDInputBuffer, sizeof(float)*lpTmpDInputBuffer.size(), cudaMemcpyDeviceToHost); #endif return ErrorCode::ERROR_CODE_NONE; } /** 学習誤差を計算する. 入力信号、出力信号は直前のCalculateの値を参照する. @param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要. 直前の計算結果を使用する */ ErrorCode Activation_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer); } } // Gravisbell; } // Layer; } // NeuralNetwork;
659bc30b91f8034b9de61533f300fc41a52d3b3b.hip
// !!! This is a file automatically generated by hipify!!! #include "UpSamplePlugin.hpp" #include "plugin_utils.h" #include "spdlog/spdlog.h" #include "hip/hip_runtime.h" #include "hip/hip_fp16.h" #include <cassert> typedef unsigned char u_int8_t; const int NUM_THREADS = 512; const char* UPSAMPLE_PLUGIN_VERSION = "01"; const char* UPSAMPLE_PLUGIN_TYPE = "UpSamplePlugin"; const char* UPSAMPLE_PLUGIN_NAMESPACE = "_TRT"; const char* UPSAMPLE_PLUGIN_NAME = "UpSamplePlugin_TRT"; UpSamplePlugin::UpSamplePlugin(const float scale) : mScale{scale} { } UpSamplePlugin::UpSamplePlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mCHW); read(d, mDataType); read(d, mScale); read(d, mOutputWidth); read(d, mOutputHeight); read(d, mThreadCount); //std::cout << "read:" << a << " " << mOutputWidth<< " " <<mOutputHeight<<std::endl; assert(d == a + length); } UpSamplePlugin::~UpSamplePlugin() { } Dims UpSamplePlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { // spdlog::info("getOutputDimensions..."); // spdlog::info("input dimemsion: {},{},{}",inputs[0].d[0],inputs[0].d[1],inputs[0].d[2]); // spdlog::info("index:{}, nbInputDims:{}, mScale:{}",index,nbInputDims,mScale); // mCHW = inputs[0]; // mOutputHeight = (int)(inputs[0].d[1]* mScale); // mOutputWidth = (int)(inputs[0].d[2]* mScale); //std::cout << "ouputDims:" << mCHW.d[0] << " " << mOutputHeight << " " << mOutputWidth << std::endl; return Dims3(inputs[0].d[0], (int)(inputs[0].d[1]* mScale), (int)(inputs[0].d[2]* mScale)); } void UpSamplePlugin::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) { //std::cout << "type " << int(type) << "format " << (int)format <<std::endl; assert((type == DataType::kFLOAT || type == DataType::kHALF || type == DataType::kINT8) && format == PluginFormat::kNCHW); mCHW = inputDims[0]; mOutputWidth = outputDims[0].d[1]; mOutputHeight = outputDims[0].d[2]; mDataType = type; //std::cout << "configureWithFormat:" <<inputDims[0].d[0]<< " " <<inputDims[0].d[1] << " "<<inputDims[0].d[2] <<std::endl; } int UpSamplePlugin::initialize() { int inputHeight = mCHW.d[1]; int inputWidth = mCHW.d[2]; mOutputHeight = (int)(inputHeight * mScale); mOutputWidth = (int)(inputWidth * mScale); return 0; } void UpSamplePlugin::terminate() { // WARNING: not implement? } void UpSamplePlugin::serialize(void* buffer) const{ char* d = static_cast<char*>(buffer), *a = d; write(d, mCHW); write(d, mDataType); write(d, mScale); write(d, mOutputWidth); write(d, mOutputHeight); write(d, mThreadCount); //std::cout << "write:" << a << " " << mOutputHeight<< " " <<mOutputWidth<<std::endl; assert(d == a + getSerializationSize()); } const char* UpSamplePlugin::getPluginType() const{ return UPSAMPLE_PLUGIN_TYPE; } const char* UpSamplePlugin::getPluginVersion() const{ return UPSAMPLE_PLUGIN_VERSION; } void UpSamplePlugin::destroy() { delete this; } IPluginV2* UpSamplePlugin::clone() const{ return new UpSamplePlugin(mScale); } void UpSamplePlugin::setPluginNamespace(const char* pluginNamespace) { } const char* UpSamplePlugin::getPluginNamespace() const{ return UPSAMPLE_PLUGIN_NAMESPACE; } // return UPSAMPLE_PLUGIN_TYPE + UPSAMPLE_PLUGIN_NAMESPACE const char* UpSamplePluginCreator::getPluginName() const { return UPSAMPLE_PLUGIN_NAME; } const char* UpSamplePluginCreator::getPluginVersion() const { return UPSAMPLE_PLUGIN_VERSION; } const PluginFieldCollection* UpSamplePluginCreator::getFieldNames() { return nullptr; } IPluginV2* UpSamplePluginCreator::createPlugin(const char *layerName, const PluginFieldCollection* fc) { return nullptr; } // deserialization plugin implementation IPluginV2* UpSamplePluginCreator::deserializePlugin(const char *layerName, const void *serialData, size_t serialLength) { std::string strName{layerName}; std::transform(strName.begin(), strName.end(), strName.begin(), ::tolower); if (strName.find("upsample") != std::string::npos) { return (IPluginV2*)(new UpSamplePlugin(serialData, serialLength)); } else { std::cout << "warning : " << layerName << std::endl; assert(0); return nullptr; } } void UpSamplePluginCreator::setPluginNamespace(const char* pluginNamespace) { // don't implement it } const char* UpSamplePluginCreator::getPluginNamespace() const { return UPSAMPLE_PLUGIN_NAMESPACE; } __device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } template <typename Dtype> __global__ void upscale(const Dtype *input, Dtype *output, int no_elements, int scale_factor, int d1, int d2, int d3) { int ii = threadIdx.x + blockDim.x * blockIdx.x; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, scale_factor); output[ii]=input[ipidx]; } template <typename Dtype> void UpSamplePlugin::forwardGpu(const Dtype* input,Dtype * output, int N,int C,int H ,int W) { int numElem = N*C*H*W;hipLaunchKernelGGL(( upscale), dim3((numElem + NUM_THREADS - 1) / NUM_THREADS), dim3(NUM_THREADS), 0, 0, input,output, numElem, mScale, C, H, W); } int UpSamplePlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream) { const int channels = mCHW.d[0]; const int64_t in_height = mCHW.d[1]; const int64_t in_width = mCHW.d[2]; const int64_t out_height = mOutputHeight; const int64_t out_width = mOutputWidth; int totalElems = batchSize * in_height * in_width * channels; // Handle no-op resizes efficiently. if (out_height == in_height && out_width == in_width) { CUDA_CHECK(hipMemcpyAsync(outputs[0], inputs[0], totalElems * type2size(mDataType), hipMemcpyDeviceToDevice, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); return 0; } //CUDA_CHECK(hipStreamSynchronize(stream)); switch (mDataType) { case DataType::kFLOAT : forwardGpu<float>((const float *)inputs[0],(float *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth); break; case DataType::kHALF: forwardGpu<__half>((const __half *)inputs[0],(__half *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth); break; case DataType::kINT8: forwardGpu<u_int8_t>((const u_int8_t *)inputs[0],(u_int8_t *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth); break; default: std::cerr << "error data type" << std::endl; } return 0; }; REGISTER_TENSORRT_PLUGIN(UpSamplePluginCreator);
659bc30b91f8034b9de61533f300fc41a52d3b3b.cu
#include "UpSamplePlugin.hpp" #include "plugin_utils.h" #include "spdlog/spdlog.h" #include "cuda_runtime.h" #include "cuda_fp16.h" #include <cassert> typedef unsigned char u_int8_t; const int NUM_THREADS = 512; const char* UPSAMPLE_PLUGIN_VERSION = "01"; const char* UPSAMPLE_PLUGIN_TYPE = "UpSamplePlugin"; const char* UPSAMPLE_PLUGIN_NAMESPACE = "_TRT"; const char* UPSAMPLE_PLUGIN_NAME = "UpSamplePlugin_TRT"; UpSamplePlugin::UpSamplePlugin(const float scale) : mScale{scale} { } UpSamplePlugin::UpSamplePlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mCHW); read(d, mDataType); read(d, mScale); read(d, mOutputWidth); read(d, mOutputHeight); read(d, mThreadCount); //std::cout << "read:" << a << " " << mOutputWidth<< " " <<mOutputHeight<<std::endl; assert(d == a + length); } UpSamplePlugin::~UpSamplePlugin() { } Dims UpSamplePlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { // spdlog::info("getOutputDimensions..."); // spdlog::info("input dimemsion: {},{},{}",inputs[0].d[0],inputs[0].d[1],inputs[0].d[2]); // spdlog::info("index:{}, nbInputDims:{}, mScale:{}",index,nbInputDims,mScale); // mCHW = inputs[0]; // mOutputHeight = (int)(inputs[0].d[1]* mScale); // mOutputWidth = (int)(inputs[0].d[2]* mScale); //std::cout << "ouputDims:" << mCHW.d[0] << " " << mOutputHeight << " " << mOutputWidth << std::endl; return Dims3(inputs[0].d[0], (int)(inputs[0].d[1]* mScale), (int)(inputs[0].d[2]* mScale)); } void UpSamplePlugin::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) { //std::cout << "type " << int(type) << "format " << (int)format <<std::endl; assert((type == DataType::kFLOAT || type == DataType::kHALF || type == DataType::kINT8) && format == PluginFormat::kNCHW); mCHW = inputDims[0]; mOutputWidth = outputDims[0].d[1]; mOutputHeight = outputDims[0].d[2]; mDataType = type; //std::cout << "configureWithFormat:" <<inputDims[0].d[0]<< " " <<inputDims[0].d[1] << " "<<inputDims[0].d[2] <<std::endl; } int UpSamplePlugin::initialize() { int inputHeight = mCHW.d[1]; int inputWidth = mCHW.d[2]; mOutputHeight = (int)(inputHeight * mScale); mOutputWidth = (int)(inputWidth * mScale); return 0; } void UpSamplePlugin::terminate() { // WARNING: not implement? } void UpSamplePlugin::serialize(void* buffer) const{ char* d = static_cast<char*>(buffer), *a = d; write(d, mCHW); write(d, mDataType); write(d, mScale); write(d, mOutputWidth); write(d, mOutputHeight); write(d, mThreadCount); //std::cout << "write:" << a << " " << mOutputHeight<< " " <<mOutputWidth<<std::endl; assert(d == a + getSerializationSize()); } const char* UpSamplePlugin::getPluginType() const{ return UPSAMPLE_PLUGIN_TYPE; } const char* UpSamplePlugin::getPluginVersion() const{ return UPSAMPLE_PLUGIN_VERSION; } void UpSamplePlugin::destroy() { delete this; } IPluginV2* UpSamplePlugin::clone() const{ return new UpSamplePlugin(mScale); } void UpSamplePlugin::setPluginNamespace(const char* pluginNamespace) { } const char* UpSamplePlugin::getPluginNamespace() const{ return UPSAMPLE_PLUGIN_NAMESPACE; } // return UPSAMPLE_PLUGIN_TYPE + UPSAMPLE_PLUGIN_NAMESPACE const char* UpSamplePluginCreator::getPluginName() const { return UPSAMPLE_PLUGIN_NAME; } const char* UpSamplePluginCreator::getPluginVersion() const { return UPSAMPLE_PLUGIN_VERSION; } const PluginFieldCollection* UpSamplePluginCreator::getFieldNames() { return nullptr; } IPluginV2* UpSamplePluginCreator::createPlugin(const char *layerName, const PluginFieldCollection* fc) { return nullptr; } // deserialization plugin implementation IPluginV2* UpSamplePluginCreator::deserializePlugin(const char *layerName, const void *serialData, size_t serialLength) { std::string strName{layerName}; std::transform(strName.begin(), strName.end(), strName.begin(), ::tolower); if (strName.find("upsample") != std::string::npos) { return (IPluginV2*)(new UpSamplePlugin(serialData, serialLength)); } else { std::cout << "warning : " << layerName << std::endl; assert(0); return nullptr; } } void UpSamplePluginCreator::setPluginNamespace(const char* pluginNamespace) { // don't implement it } const char* UpSamplePluginCreator::getPluginNamespace() const { return UPSAMPLE_PLUGIN_NAMESPACE; } __device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } template <typename Dtype> __global__ void upscale(const Dtype *input, Dtype *output, int no_elements, int scale_factor, int d1, int d2, int d3) { int ii = threadIdx.x + blockDim.x * blockIdx.x; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, scale_factor); output[ii]=input[ipidx]; } template <typename Dtype> void UpSamplePlugin::forwardGpu(const Dtype* input,Dtype * output, int N,int C,int H ,int W) { int numElem = N*C*H*W; upscale<<<(numElem + NUM_THREADS - 1) / NUM_THREADS, NUM_THREADS>>>(input,output, numElem, mScale, C, H, W); } int UpSamplePlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream) { const int channels = mCHW.d[0]; const int64_t in_height = mCHW.d[1]; const int64_t in_width = mCHW.d[2]; const int64_t out_height = mOutputHeight; const int64_t out_width = mOutputWidth; int totalElems = batchSize * in_height * in_width * channels; // Handle no-op resizes efficiently. if (out_height == in_height && out_width == in_width) { CUDA_CHECK(cudaMemcpyAsync(outputs[0], inputs[0], totalElems * type2size(mDataType), cudaMemcpyDeviceToDevice, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); return 0; } //CUDA_CHECK(cudaStreamSynchronize(stream)); switch (mDataType) { case DataType::kFLOAT : forwardGpu<float>((const float *)inputs[0],(float *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth); break; case DataType::kHALF: forwardGpu<__half>((const __half *)inputs[0],(__half *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth); break; case DataType::kINT8: forwardGpu<u_int8_t>((const u_int8_t *)inputs[0],(u_int8_t *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth); break; default: std::cerr << "error data type" << std::endl; } return 0; }; REGISTER_TENSORRT_PLUGIN(UpSamplePluginCreator);
d92f79cd882f90ccabf41c6ca02daeb91658f4e4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <assert.h> #include <hip/hip_runtime.h> // #include "cutil.h" #include "mex.h" #define SPXL 8 #define SPXR 7 #define SPYU 8 #define SPYD 7 #define BLOCK_SIZE_Y 16 //SPYU+SPYD+1 #define BLOCK_SIZE_X 16 //SPYU+SPYD+1 #define MAX_CELL_SIZE 50 #define min(a, b) ((a) < (b) ? (a) : (b)) #define max(a, b) ((a) > (b) ? (a) : (b)) #define MAX_LEVELS 1000 int quickSort(float *, int, int *, int *); __global__ void NSSD(float *, float *, int , int , int *, int *, int *, int *, float *, int *, int *, float *, float *); int quickSort(float *arr, int elements, int *arr1, int *arr2) { float piv, piv1; int p1, p2; int beg[MAX_LEVELS], end[MAX_LEVELS], i=0, L, R ; beg[0]=0; end[0]=elements; while (i>=0) { L=beg[i]; R=end[i]-1; if (L<R) { piv=(arr[L]); piv1=arr[L]; p1=arr1[L]; p2=arr2[L]; if (i==MAX_LEVELS-1) return 0; while (L<R) { while ((arr[R])>=piv && L<R) R--; if (L<R) { arr[L]=arr[R]; arr1[L]=arr1[R]; arr2[L++]=arr2[R]; } while ((arr[L])<=piv && L<R) L++; if (L<R) { arr[R]=arr[L]; arr1[R]=arr1[L]; arr2[R--]=arr2[L]; } } arr[L]=piv1; arr1[L]=p1; arr2[L]=p2; beg[i+1]=L+1; end[i+1]=end[i]; end[i++]=L; } else i--; } return 1; } __global__ void NSSD(float *Im1, float *Im2, int IMSIZEX, int IMSIZEY, int *xmin, int *xmax, int *ymin, int *ymax, float *C, int *movedE, int *movedS, float *Sp, float *SpVal) { // Block index int bID = blockIdx.x; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty*BLOCK_SIZE_X+tx; int tSizeX, tSizeY, tStartX, tStartY; int x, y; int saStartX, saStartY, saEndX, saEndY, saSizeX, saSizeY; tStartX = xmin[bID]-1; tStartY = ymin[bID]-1; tSizeX = xmax[bID] - tStartX; tSizeY = ymax[bID] - tStartY; saStartX = max(0, tStartX - SPXL); saStartY = max(0, tStartY - SPYU); saEndX = min(IMSIZEX-1, xmax[bID]-1 + SPXR); saEndY = min(IMSIZEY-1, ymax[bID]-1 + SPYD); saSizeX = saEndX - saStartX +1; saSizeY = saEndY - saStartY +1; __shared__ char SP[MAX_CELL_SIZE][MAX_CELL_SIZE]; __shared__ float T[MAX_CELL_SIZE][MAX_CELL_SIZE]; __shared__ float meanT; __shared__ float SumToffsetSqrd ; meanT = 0.; SumToffsetSqrd = 0.; __shared__ float temp[BLOCK_SIZE_X*BLOCK_SIZE_Y]; __shared__ int tempi[BLOCK_SIZE_X*BLOCK_SIZE_Y]; float meanF = 0.; float SumFoffsetSqrd = 0.; float numerator = 0.; __shared__ int noOfOnPixels; __shared__ int noOfPixelsToWriteInCellX; __shared__ int noOfPixelsToWriteInCellY; //__shared__ int noOfPixelsToWriteInSAX; //__shared__ int noOfPixelsToWriteInSAY; noOfPixelsToWriteInCellX = int((1.*tSizeX/BLOCK_SIZE_X)+1); noOfPixelsToWriteInCellY = int((1.*tSizeY/BLOCK_SIZE_X)+1); //noOfPixelsToWriteInSAX = int((1.*saSizeX/BLOCK_SIZE_X)+1); //noOfPixelsToWriteInSAY = int((1.*saSizeY/BLOCK_SIZE_X)+1); tempi[tid] = 0; for (x = noOfPixelsToWriteInCellX*tx ; x<min(noOfPixelsToWriteInCellX*(tx+1), tSizeX); x++ ) for (y = noOfPixelsToWriteInCellY*ty ; y<min(noOfPixelsToWriteInCellY*(ty+1), tSizeY); y++ ) { T[x][y] = Im1[(tStartX +x)*IMSIZEY + tStartY + y]; SP[x][y] = char(Sp[(tStartX +x)*IMSIZEY + tStartY + y]== SpVal[bID]); tempi[tid]+=int(SP[x][y]); } for(int stride = BLOCK_SIZE_X*BLOCK_SIZE_Y / 2; stride > 0; stride >>= 1) { __syncthreads(); for(int iAccum = tid; iAccum < stride; iAccum += BLOCK_SIZE_X*BLOCK_SIZE_Y) tempi[iAccum] += tempi[stride + iAccum]; } noOfOnPixels = tempi[0]; __syncthreads(); temp[tid] = 0.; for (x = noOfPixelsToWriteInCellX*tx ; x<min(noOfPixelsToWriteInCellX*(tx+1), tSizeX); x++ ) for (y = noOfPixelsToWriteInCellY*ty ; y<min(noOfPixelsToWriteInCellY*(ty+1), tSizeY); y++ ) if (SP[x][y]) temp[tid] += T[x][y]/noOfOnPixels; for(int stride = BLOCK_SIZE_X*BLOCK_SIZE_Y / 2; stride > 0; stride >>= 1) { __syncthreads(); for(int iAccum = tid; iAccum < stride; iAccum += BLOCK_SIZE_X*BLOCK_SIZE_Y) temp[iAccum] += temp[stride + iAccum]; } meanT = temp[0]; __syncthreads(); temp[tid] =0.; for (x = noOfPixelsToWriteInCellX*tx ; x<min(noOfPixelsToWriteInCellX*(tx+1), tSizeX); x++ ) for (y = noOfPixelsToWriteInCellY*ty ; y<min(noOfPixelsToWriteInCellY*(ty+1), tSizeY); y++ ) if (SP[x][y]) temp[tid] += pow(T[x][y] - meanT,2); for(int stride = BLOCK_SIZE_X*BLOCK_SIZE_Y / 2; stride > 0; stride >>= 1) { __syncthreads(); for(int iAccum = tid; iAccum < stride; iAccum += BLOCK_SIZE_X*BLOCK_SIZE_Y) temp[iAccum] += temp[stride + iAccum]; } SumToffsetSqrd = temp[0]; __syncthreads(); //for (x = noOfPixelsToWriteInSAX*tx ; x<min(noOfPixelsToWriteInSAX*(tx+1), saSizeX); x++ ) // for (y = noOfPixelsToWriteInSAY*ty ; y<min(noOfPixelsToWriteInSAY*(ty+1), saSizeY); y++ ) { // SA[x][y] = Im2[(saStartX +x)*IMSIZEY + saStartY + y]; // } //__syncthreads(); // Final x,y positions in search area where each threads writes an element int sax2 = min(tx + tSizeX -1, saSizeX-1); int say2 = min(ty + tSizeY -1, saSizeY-1); meanF =0.; for (x=tx; x<=sax2 ; x++) for (y=ty; y<=say2; y++) if (SP[x-tx][y-ty]) meanF += Im2[(saStartX +x)*IMSIZEY + saStartY + y] / noOfOnPixels; __syncthreads(); SumFoffsetSqrd =0.; for (x=tx; x<=sax2 ; x++) for (y=ty; y<=say2; y++) if (SP[x-tx][y-ty]) SumFoffsetSqrd += pow(Im2[(saStartX +x)*IMSIZEY + saStartY + y] - meanF, 2); __syncthreads(); float denom = SumToffsetSqrd + SumFoffsetSqrd; for (x=tx; x<=sax2 ; x++) for (y=ty; y<=say2; y++) if (SP[x-tx][y-ty]) numerator += pow(T[x-tx][y-ty] - meanT - Im2[(saStartX +x)*IMSIZEY + saStartY + y] + meanF, 2); numerator = float(0.5) * numerator; __syncthreads(); C[bID * BLOCK_SIZE_X * BLOCK_SIZE_Y + BLOCK_SIZE_X * ty + tx] = -10.5 * (numerator/denom - 0.3) * (tx + tSizeX -1 < saSizeX) * (ty + tSizeY -1 < saSizeY); movedE[bID * BLOCK_SIZE_X * BLOCK_SIZE_Y + BLOCK_SIZE_X * ty + tx] = tx - SPXL; movedS[bID * BLOCK_SIZE_X * BLOCK_SIZE_Y + BLOCK_SIZE_X * ty + tx] = ty - SPYU; } void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { float *Im1f, *Im2f, *Sp2f, *Im1_d, *Im2_d, *Sp2_d, *Sp2Val_d; double *Im1, *Im2, *Sp2, *boundsX, *boundsY, *Sp2Val; double noHits; int i,j, pos, noPels, NC, M, N; int *xmin, *ymin, *xmax, *ymax, *xmin_d, *ymin_d, *xmax_d, *ymax_d; int *movedS, *movedE, *movedS_d, *movedE_d, *bestmovedS, *bestmovedE; float *maxScore_d, *maxScoref, *bestScores, *Sp2Valf; double *maxScoreRes, *movedSRes, *movedERes; //double N0, lambda; //int flag; //float *meanT, *meanTd; //flag = mxIsDouble(prhs[0]) ; /* Find the dimensions of the data */ M = mxGetM(prhs[0]); N = mxGetN(prhs[0]); NC = mxGetM(prhs[2]); /* Retrieve the input data */ Im1 = mxGetPr(prhs[0]); Im2 = mxGetPr(prhs[1]); boundsX = mxGetPr(prhs[2]); boundsY = mxGetPr(prhs[3]); Sp2 = mxGetPr(prhs[4]); Sp2Val = mxGetPr(prhs[5]); noHits = mxGetScalar(prhs[6]); //N0 = mxGetScalar(prhs[6]); //lambda = mxGetScalar(prhs[7]); noPels = M*N; /* Check if the input array is single or double precision */ Im1f = (float *) mxMalloc(noPels*sizeof(float)); for (j = 0; j < M*N; j++) { Im1f[j] = (float) Im1[j]; } Im2f = (float *) mxMalloc(noPels*sizeof(float)); for (j = 0; j < M*N; j++) { Im2f[j] = (float) Im2[j]; } Sp2f = (float *) mxMalloc(noPels*sizeof(float)); for (j = 0; j < M*N; j++) { Sp2f[j] = (float) Sp2[j]; } Sp2Valf = (float *) mxMalloc(NC*sizeof(float)); for (j = 0; j < NC; j++) { Sp2Valf[j] = (float) Sp2Val[j]; } xmin = (int *) mxMalloc(NC*sizeof(int)); for (j = 0; j < NC; j++) { xmin[j] = (int) boundsX[j]; } xmax = (int *) mxMalloc(NC*sizeof(int)); for (j = 0; j < NC; j++) { xmax[j] = (int) boundsX[NC+j]; } ymin = (int *) mxMalloc(NC*sizeof(int)); for (j = 0; j < NC; j++) { ymin[j] = (int) boundsY[j]; } ymax = (int *) mxMalloc(NC*sizeof(int)); for (j = 0; j < NC; j++) { ymax[j] = (int) boundsY[NC+j]; } maxScoref = (float *) mxMalloc(NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(float)); movedS = (int *) mxMalloc(NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int)); movedE = (int *) mxMalloc(NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int)); //meanT = (float *) malloc( BLOCK_SIZE_X*BLOCK_SIZE_Y*sizeof(float)); hipError_t error ; error = hipMalloc((void **) &Im1_d, noPels*sizeof(float)); error = hipMalloc((void **) &Im2_d, noPels*sizeof(float)); error = hipMalloc((void **) &Sp2_d, noPels*sizeof(float)); error = hipMalloc((void **) &Sp2Val_d, NC*sizeof(float)); error = hipMalloc((void **) &xmin_d, NC*sizeof(int)); error = hipMalloc((void **) &xmax_d, NC*sizeof(int)); error = hipMalloc((void **) &ymin_d, NC*sizeof(int)); error = hipMalloc((void **) &ymax_d, NC*sizeof(int)); error = hipMalloc((void **) &maxScore_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(float)); error = hipMalloc((void **) &movedS_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int)); error = hipMalloc((void **) &movedE_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int)); error = hipMemcpy(Im1_d, Im1f, noPels*sizeof(float), hipMemcpyHostToDevice); error = hipMemcpy(Im2_d, Im2f, noPels*sizeof(float), hipMemcpyHostToDevice); error = hipMemcpy(xmin_d, xmin, NC*sizeof(int), hipMemcpyHostToDevice); error = hipMemcpy(xmax_d, xmax, NC*sizeof(int), hipMemcpyHostToDevice); error = hipMemcpy(ymin_d, ymin, NC*sizeof(int), hipMemcpyHostToDevice); error = hipMemcpy(ymax_d, ymax, NC*sizeof(int), hipMemcpyHostToDevice); error = hipMemcpy(Sp2_d, Sp2f, noPels*sizeof(float), hipMemcpyHostToDevice); error = hipMemcpy(Sp2Val_d, Sp2Valf, NC*sizeof(float), hipMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 dimGrid(NC,1); printf("%d\n",sizeof(float)); printf("Starting GPU...\n"); hipLaunchKernelGGL(( NSSD), dim3(dimGrid), dim3(dimBlock), 0, 0, Im1_d, Im2_d, N, M, xmin_d, xmax_d, ymin_d, ymax_d, maxScore_d, movedS_d, movedE_d, Sp2_d, Sp2Val_d); printf("GPU Completed...\n"); hipMemcpy(maxScoref, maxScore_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(movedS, movedS_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(movedE, movedE_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int), hipMemcpyDeviceToHost); bestScores = (float *) mxMalloc(NC*noHits*sizeof(float)); bestmovedS = (int *) mxMalloc(NC*noHits*sizeof(int)); bestmovedE = (int *) mxMalloc(NC*noHits*sizeof(int)); for (j = 0; j < NC*(SPYU+SPYD+1)*(SPXL+SPXR+1); j+=(SPYU+SPYD+1)*(SPXL+SPXR+1)) { quickSort(&maxScoref[j],(SPYU+SPYD+1)*(SPXL+SPXR+1),&movedS[j],&movedE[j]); for (i = 0;i<noHits;i++) { pos = ( j/((SPYU+SPYD+1)*(SPXL+SPXR+1)) )*noHits+i; //sth wrong here bestScores[pos] = maxScoref[j+(SPYU+SPYD+1)*(SPXL+SPXR+1)-1-i]; bestmovedS[pos] = movedS[j+(SPYU+SPYD+1)*(SPXL+SPXR+1)-1-i]; bestmovedE[pos] = movedE[j+(SPYU+SPYD+1)*(SPXL+SPXR+1)-1-i]; } } /* Setup the output */ plhs[0] = mxCreateDoubleMatrix(noHits,NC,mxREAL); maxScoreRes = mxGetPr(plhs[0]); plhs[1] = mxCreateDoubleMatrix(noHits,NC,mxREAL); movedSRes = mxGetPr(plhs[1]); plhs[2] = mxCreateDoubleMatrix(noHits,NC,mxREAL); movedERes = mxGetPr(plhs[2]); for (j = 0; j < NC*noHits; j++) { maxScoreRes[j] = (double) bestScores[j]; } for (j = 0; j < NC*noHits; j++) { movedSRes[j] = (double) bestmovedS[j]; } for (j = 0; j < NC*noHits; j++) { movedERes[j] = (double) bestmovedE[j]; } hipFree(Im1_d); hipFree(Im2_d); hipFree(xmin_d); hipFree(xmax_d); hipFree(ymin_d); hipFree(ymax_d); hipFree(maxScore_d); hipFree(movedS_d); hipFree(movedE_d); hipFree(Sp2_d); hipFree(Sp2Val_d); mxFree(Im1f); mxFree(Im2f); mxFree(Sp2f); mxFree(Sp2Valf); mxFree(xmin); mxFree(xmax); mxFree(ymin); mxFree(ymax); mxFree(maxScoref); mxFree(movedS); mxFree(movedE); mxFree(bestScores); mxFree(bestmovedS); mxFree(bestmovedE); }
d92f79cd882f90ccabf41c6ca02daeb91658f4e4.cu
#include <stdio.h> #include <string.h> #include <assert.h> #include <cuda_runtime.h> // #include "cutil.h" #include "mex.h" #define SPXL 8 #define SPXR 7 #define SPYU 8 #define SPYD 7 #define BLOCK_SIZE_Y 16 //SPYU+SPYD+1 #define BLOCK_SIZE_X 16 //SPYU+SPYD+1 #define MAX_CELL_SIZE 50 #define min(a, b) ((a) < (b) ? (a) : (b)) #define max(a, b) ((a) > (b) ? (a) : (b)) #define MAX_LEVELS 1000 int quickSort(float *, int, int *, int *); __global__ void NSSD(float *, float *, int , int , int *, int *, int *, int *, float *, int *, int *, float *, float *); int quickSort(float *arr, int elements, int *arr1, int *arr2) { float piv, piv1; int p1, p2; int beg[MAX_LEVELS], end[MAX_LEVELS], i=0, L, R ; beg[0]=0; end[0]=elements; while (i>=0) { L=beg[i]; R=end[i]-1; if (L<R) { piv=(arr[L]); piv1=arr[L]; p1=arr1[L]; p2=arr2[L]; if (i==MAX_LEVELS-1) return 0; while (L<R) { while ((arr[R])>=piv && L<R) R--; if (L<R) { arr[L]=arr[R]; arr1[L]=arr1[R]; arr2[L++]=arr2[R]; } while ((arr[L])<=piv && L<R) L++; if (L<R) { arr[R]=arr[L]; arr1[R]=arr1[L]; arr2[R--]=arr2[L]; } } arr[L]=piv1; arr1[L]=p1; arr2[L]=p2; beg[i+1]=L+1; end[i+1]=end[i]; end[i++]=L; } else i--; } return 1; } __global__ void NSSD(float *Im1, float *Im2, int IMSIZEX, int IMSIZEY, int *xmin, int *xmax, int *ymin, int *ymax, float *C, int *movedE, int *movedS, float *Sp, float *SpVal) { // Block index int bID = blockIdx.x; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty*BLOCK_SIZE_X+tx; int tSizeX, tSizeY, tStartX, tStartY; int x, y; int saStartX, saStartY, saEndX, saEndY, saSizeX, saSizeY; tStartX = xmin[bID]-1; tStartY = ymin[bID]-1; tSizeX = xmax[bID] - tStartX; tSizeY = ymax[bID] - tStartY; saStartX = max(0, tStartX - SPXL); saStartY = max(0, tStartY - SPYU); saEndX = min(IMSIZEX-1, xmax[bID]-1 + SPXR); saEndY = min(IMSIZEY-1, ymax[bID]-1 + SPYD); saSizeX = saEndX - saStartX +1; saSizeY = saEndY - saStartY +1; __shared__ char SP[MAX_CELL_SIZE][MAX_CELL_SIZE]; __shared__ float T[MAX_CELL_SIZE][MAX_CELL_SIZE]; __shared__ float meanT; __shared__ float SumToffsetSqrd ; meanT = 0.; SumToffsetSqrd = 0.; __shared__ float temp[BLOCK_SIZE_X*BLOCK_SIZE_Y]; __shared__ int tempi[BLOCK_SIZE_X*BLOCK_SIZE_Y]; float meanF = 0.; float SumFoffsetSqrd = 0.; float numerator = 0.; __shared__ int noOfOnPixels; __shared__ int noOfPixelsToWriteInCellX; __shared__ int noOfPixelsToWriteInCellY; //__shared__ int noOfPixelsToWriteInSAX; //__shared__ int noOfPixelsToWriteInSAY; noOfPixelsToWriteInCellX = int((1.*tSizeX/BLOCK_SIZE_X)+1); noOfPixelsToWriteInCellY = int((1.*tSizeY/BLOCK_SIZE_X)+1); //noOfPixelsToWriteInSAX = int((1.*saSizeX/BLOCK_SIZE_X)+1); //noOfPixelsToWriteInSAY = int((1.*saSizeY/BLOCK_SIZE_X)+1); tempi[tid] = 0; for (x = noOfPixelsToWriteInCellX*tx ; x<min(noOfPixelsToWriteInCellX*(tx+1), tSizeX); x++ ) for (y = noOfPixelsToWriteInCellY*ty ; y<min(noOfPixelsToWriteInCellY*(ty+1), tSizeY); y++ ) { T[x][y] = Im1[(tStartX +x)*IMSIZEY + tStartY + y]; SP[x][y] = char(Sp[(tStartX +x)*IMSIZEY + tStartY + y]== SpVal[bID]); tempi[tid]+=int(SP[x][y]); } for(int stride = BLOCK_SIZE_X*BLOCK_SIZE_Y / 2; stride > 0; stride >>= 1) { __syncthreads(); for(int iAccum = tid; iAccum < stride; iAccum += BLOCK_SIZE_X*BLOCK_SIZE_Y) tempi[iAccum] += tempi[stride + iAccum]; } noOfOnPixels = tempi[0]; __syncthreads(); temp[tid] = 0.; for (x = noOfPixelsToWriteInCellX*tx ; x<min(noOfPixelsToWriteInCellX*(tx+1), tSizeX); x++ ) for (y = noOfPixelsToWriteInCellY*ty ; y<min(noOfPixelsToWriteInCellY*(ty+1), tSizeY); y++ ) if (SP[x][y]) temp[tid] += T[x][y]/noOfOnPixels; for(int stride = BLOCK_SIZE_X*BLOCK_SIZE_Y / 2; stride > 0; stride >>= 1) { __syncthreads(); for(int iAccum = tid; iAccum < stride; iAccum += BLOCK_SIZE_X*BLOCK_SIZE_Y) temp[iAccum] += temp[stride + iAccum]; } meanT = temp[0]; __syncthreads(); temp[tid] =0.; for (x = noOfPixelsToWriteInCellX*tx ; x<min(noOfPixelsToWriteInCellX*(tx+1), tSizeX); x++ ) for (y = noOfPixelsToWriteInCellY*ty ; y<min(noOfPixelsToWriteInCellY*(ty+1), tSizeY); y++ ) if (SP[x][y]) temp[tid] += pow(T[x][y] - meanT,2); for(int stride = BLOCK_SIZE_X*BLOCK_SIZE_Y / 2; stride > 0; stride >>= 1) { __syncthreads(); for(int iAccum = tid; iAccum < stride; iAccum += BLOCK_SIZE_X*BLOCK_SIZE_Y) temp[iAccum] += temp[stride + iAccum]; } SumToffsetSqrd = temp[0]; __syncthreads(); //for (x = noOfPixelsToWriteInSAX*tx ; x<min(noOfPixelsToWriteInSAX*(tx+1), saSizeX); x++ ) // for (y = noOfPixelsToWriteInSAY*ty ; y<min(noOfPixelsToWriteInSAY*(ty+1), saSizeY); y++ ) { // SA[x][y] = Im2[(saStartX +x)*IMSIZEY + saStartY + y]; // } //__syncthreads(); // Final x,y positions in search area where each threads writes an element int sax2 = min(tx + tSizeX -1, saSizeX-1); int say2 = min(ty + tSizeY -1, saSizeY-1); meanF =0.; for (x=tx; x<=sax2 ; x++) for (y=ty; y<=say2; y++) if (SP[x-tx][y-ty]) meanF += Im2[(saStartX +x)*IMSIZEY + saStartY + y] / noOfOnPixels; __syncthreads(); SumFoffsetSqrd =0.; for (x=tx; x<=sax2 ; x++) for (y=ty; y<=say2; y++) if (SP[x-tx][y-ty]) SumFoffsetSqrd += pow(Im2[(saStartX +x)*IMSIZEY + saStartY + y] - meanF, 2); __syncthreads(); float denom = SumToffsetSqrd + SumFoffsetSqrd; for (x=tx; x<=sax2 ; x++) for (y=ty; y<=say2; y++) if (SP[x-tx][y-ty]) numerator += pow(T[x-tx][y-ty] - meanT - Im2[(saStartX +x)*IMSIZEY + saStartY + y] + meanF, 2); numerator = float(0.5) * numerator; __syncthreads(); C[bID * BLOCK_SIZE_X * BLOCK_SIZE_Y + BLOCK_SIZE_X * ty + tx] = -10.5 * (numerator/denom - 0.3) * (tx + tSizeX -1 < saSizeX) * (ty + tSizeY -1 < saSizeY); movedE[bID * BLOCK_SIZE_X * BLOCK_SIZE_Y + BLOCK_SIZE_X * ty + tx] = tx - SPXL; movedS[bID * BLOCK_SIZE_X * BLOCK_SIZE_Y + BLOCK_SIZE_X * ty + tx] = ty - SPYU; } void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { float *Im1f, *Im2f, *Sp2f, *Im1_d, *Im2_d, *Sp2_d, *Sp2Val_d; double *Im1, *Im2, *Sp2, *boundsX, *boundsY, *Sp2Val; double noHits; int i,j, pos, noPels, NC, M, N; int *xmin, *ymin, *xmax, *ymax, *xmin_d, *ymin_d, *xmax_d, *ymax_d; int *movedS, *movedE, *movedS_d, *movedE_d, *bestmovedS, *bestmovedE; float *maxScore_d, *maxScoref, *bestScores, *Sp2Valf; double *maxScoreRes, *movedSRes, *movedERes; //double N0, lambda; //int flag; //float *meanT, *meanTd; //flag = mxIsDouble(prhs[0]) ; /* Find the dimensions of the data */ M = mxGetM(prhs[0]); N = mxGetN(prhs[0]); NC = mxGetM(prhs[2]); /* Retrieve the input data */ Im1 = mxGetPr(prhs[0]); Im2 = mxGetPr(prhs[1]); boundsX = mxGetPr(prhs[2]); boundsY = mxGetPr(prhs[3]); Sp2 = mxGetPr(prhs[4]); Sp2Val = mxGetPr(prhs[5]); noHits = mxGetScalar(prhs[6]); //N0 = mxGetScalar(prhs[6]); //lambda = mxGetScalar(prhs[7]); noPels = M*N; /* Check if the input array is single or double precision */ Im1f = (float *) mxMalloc(noPels*sizeof(float)); for (j = 0; j < M*N; j++) { Im1f[j] = (float) Im1[j]; } Im2f = (float *) mxMalloc(noPels*sizeof(float)); for (j = 0; j < M*N; j++) { Im2f[j] = (float) Im2[j]; } Sp2f = (float *) mxMalloc(noPels*sizeof(float)); for (j = 0; j < M*N; j++) { Sp2f[j] = (float) Sp2[j]; } Sp2Valf = (float *) mxMalloc(NC*sizeof(float)); for (j = 0; j < NC; j++) { Sp2Valf[j] = (float) Sp2Val[j]; } xmin = (int *) mxMalloc(NC*sizeof(int)); for (j = 0; j < NC; j++) { xmin[j] = (int) boundsX[j]; } xmax = (int *) mxMalloc(NC*sizeof(int)); for (j = 0; j < NC; j++) { xmax[j] = (int) boundsX[NC+j]; } ymin = (int *) mxMalloc(NC*sizeof(int)); for (j = 0; j < NC; j++) { ymin[j] = (int) boundsY[j]; } ymax = (int *) mxMalloc(NC*sizeof(int)); for (j = 0; j < NC; j++) { ymax[j] = (int) boundsY[NC+j]; } maxScoref = (float *) mxMalloc(NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(float)); movedS = (int *) mxMalloc(NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int)); movedE = (int *) mxMalloc(NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int)); //meanT = (float *) malloc( BLOCK_SIZE_X*BLOCK_SIZE_Y*sizeof(float)); cudaError_t error ; error = cudaMalloc((void **) &Im1_d, noPels*sizeof(float)); error = cudaMalloc((void **) &Im2_d, noPels*sizeof(float)); error = cudaMalloc((void **) &Sp2_d, noPels*sizeof(float)); error = cudaMalloc((void **) &Sp2Val_d, NC*sizeof(float)); error = cudaMalloc((void **) &xmin_d, NC*sizeof(int)); error = cudaMalloc((void **) &xmax_d, NC*sizeof(int)); error = cudaMalloc((void **) &ymin_d, NC*sizeof(int)); error = cudaMalloc((void **) &ymax_d, NC*sizeof(int)); error = cudaMalloc((void **) &maxScore_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(float)); error = cudaMalloc((void **) &movedS_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int)); error = cudaMalloc((void **) &movedE_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int)); error = cudaMemcpy(Im1_d, Im1f, noPels*sizeof(float), cudaMemcpyHostToDevice); error = cudaMemcpy(Im2_d, Im2f, noPels*sizeof(float), cudaMemcpyHostToDevice); error = cudaMemcpy(xmin_d, xmin, NC*sizeof(int), cudaMemcpyHostToDevice); error = cudaMemcpy(xmax_d, xmax, NC*sizeof(int), cudaMemcpyHostToDevice); error = cudaMemcpy(ymin_d, ymin, NC*sizeof(int), cudaMemcpyHostToDevice); error = cudaMemcpy(ymax_d, ymax, NC*sizeof(int), cudaMemcpyHostToDevice); error = cudaMemcpy(Sp2_d, Sp2f, noPels*sizeof(float), cudaMemcpyHostToDevice); error = cudaMemcpy(Sp2Val_d, Sp2Valf, NC*sizeof(float), cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 dimGrid(NC,1); printf("%d\n",sizeof(float)); printf("Starting GPU...\n"); NSSD<<<dimGrid, dimBlock>>>(Im1_d, Im2_d, N, M, xmin_d, xmax_d, ymin_d, ymax_d, maxScore_d, movedS_d, movedE_d, Sp2_d, Sp2Val_d); printf("GPU Completed...\n"); cudaMemcpy(maxScoref, maxScore_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(movedS, movedS_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(movedE, movedE_d, NC*(SPYU+SPYD+1)*(SPXL+SPXR+1)*sizeof(int), cudaMemcpyDeviceToHost); bestScores = (float *) mxMalloc(NC*noHits*sizeof(float)); bestmovedS = (int *) mxMalloc(NC*noHits*sizeof(int)); bestmovedE = (int *) mxMalloc(NC*noHits*sizeof(int)); for (j = 0; j < NC*(SPYU+SPYD+1)*(SPXL+SPXR+1); j+=(SPYU+SPYD+1)*(SPXL+SPXR+1)) { quickSort(&maxScoref[j],(SPYU+SPYD+1)*(SPXL+SPXR+1),&movedS[j],&movedE[j]); for (i = 0;i<noHits;i++) { pos = ( j/((SPYU+SPYD+1)*(SPXL+SPXR+1)) )*noHits+i; //sth wrong here bestScores[pos] = maxScoref[j+(SPYU+SPYD+1)*(SPXL+SPXR+1)-1-i]; bestmovedS[pos] = movedS[j+(SPYU+SPYD+1)*(SPXL+SPXR+1)-1-i]; bestmovedE[pos] = movedE[j+(SPYU+SPYD+1)*(SPXL+SPXR+1)-1-i]; } } /* Setup the output */ plhs[0] = mxCreateDoubleMatrix(noHits,NC,mxREAL); maxScoreRes = mxGetPr(plhs[0]); plhs[1] = mxCreateDoubleMatrix(noHits,NC,mxREAL); movedSRes = mxGetPr(plhs[1]); plhs[2] = mxCreateDoubleMatrix(noHits,NC,mxREAL); movedERes = mxGetPr(plhs[2]); for (j = 0; j < NC*noHits; j++) { maxScoreRes[j] = (double) bestScores[j]; } for (j = 0; j < NC*noHits; j++) { movedSRes[j] = (double) bestmovedS[j]; } for (j = 0; j < NC*noHits; j++) { movedERes[j] = (double) bestmovedE[j]; } cudaFree(Im1_d); cudaFree(Im2_d); cudaFree(xmin_d); cudaFree(xmax_d); cudaFree(ymin_d); cudaFree(ymax_d); cudaFree(maxScore_d); cudaFree(movedS_d); cudaFree(movedE_d); cudaFree(Sp2_d); cudaFree(Sp2Val_d); mxFree(Im1f); mxFree(Im2f); mxFree(Sp2f); mxFree(Sp2Valf); mxFree(xmin); mxFree(xmax); mxFree(ymin); mxFree(ymax); mxFree(maxScoref); mxFree(movedS); mxFree(movedE); mxFree(bestScores); mxFree(bestmovedS); mxFree(bestmovedE); }
d867f339ca8ed535b2154ac72a80c0aa2ac036fb.hip
// !!! This is a file automatically generated by hipify!!! #include <glm/vec3.hpp> #include "glad/glad.h" #include "glm/gtc/type_ptr.hpp" #include <cuda_gl_interop.h> #include <memory> #include <chrono> #include <random> #include "planet.hpp" #include "simulation.h" #include "common.hpp" #include "util.hpp" using namespace std; using namespace glm; using namespace agp; GLuint g_default_vao = 0; glm::vec3 camera; unsigned int shaderProgram = 0; int NUM_SILICATE_PARTICLES = 6000; int NUM_IRON_PARTICLES = 9000; //std::unique_ptr<GPUSimulation> simulator; const int num_particles = 2*(NUM_SILICATE_PARTICLES + NUM_IRON_PARTICLES); GPUSimulation simulator; cudaGraphicsResource_t ssbo_handle; // This will identify our vertex buffer GLuint vertexbuffer; GLuint materialBuffer; GLuint IndexVBOID; GLuint ssbo_pos; //Particle_vec4 part_array[num_particles]; GLfloat triangle[] = {0.0f, 150.0f, 0.0f, -150.0f, -150.0f, 0.0f, 150.0f, -150.0f, 0.0f}; void init() { //simulator = std::make_unique<GPUSimulation>(); simulator.init(); // Generate and bind the default VAO glGenVertexArrays(1, &g_default_vao); glBindVertexArray(g_default_vao); // Set the background color (RGBA) glClearColor(0.0f, 0.0f, 0.0f, 0.5f); // Your OpenGL settings, such as alpha, depth and others, should be // defined here! For the assignment, we only ask you to enable the // alpha channel. glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); shaderProgram = util::loadShaders("vt.glsl", "fg.glsl"); glUseProgram(shaderProgram); camera = glm::vec3(0.0, 0.0, 50000.0); glm::mat4 model, view, projection; model = glm::mat4(1.0f); view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); projection = glm::perspective(glm::radians(60.0f), (float)1280 / (float)720, 0.1f, 100000.0f); unsigned int modelLoc = glGetUniformLocation(shaderProgram, "model"); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); unsigned int projectionLoc = glGetUniformLocation(shaderProgram, "projection"); glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model)); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); glUniformMatrix4fv(projectionLoc, 1, GL_FALSE, glm::value_ptr(projection)); glGenBuffers(1, &ssbo_pos); glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo_pos); glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(Particle)*simulator.all.size(), &simulator.all[0], GL_DYNAMIC_DRAW); glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); // unbind hipGraphicsGLRegisterBuffer(&ssbo_handle, ssbo_pos, hipGraphicsRegisterFlagsNone); // Generate 1 buffer, put the resulting identifier in vertexbuffer glGenBuffers(1, &vertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer); // Give our vertices to OpenGL. glBufferData(GL_ARRAY_BUFFER, sizeof(triangle), &triangle, GL_DYNAMIC_DRAW); ushort pindices[3]; pindices[0] = 0; pindices[1] = 1; pindices[2] = 2; glGenBuffers(1, &IndexVBOID); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IndexVBOID); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ushort)*3, pindices, GL_DYNAMIC_DRAW); } void release() { // Release the default VAO glDeleteVertexArrays(1, &g_default_vao); // Do not forget to release any memory allocation here! simulator.release(); } void display() { simulator.update(ssbo_handle); // Clear the screen glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //printf("FreeGLUT triggered the display() callback!\n"); // Your rendering code must be here! Do not forget to swap the // front and back buffers, and to force a redisplay to keep the // render loop running. This functionality is available withinglm::vec3 // FreeGLUT as well, check the assignment for more information. GLint color_location = glGetUniformLocation(shaderProgram, "myColor"); glUniform4fv(color_location, 1, simulator.planets[0]->getSilicateColor()); color_location = glGetUniformLocation(shaderProgram, "myColor"); glEnableVertexAttribArray(0); glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer); glEnableClientState(GL_VERTEX_ARRAY); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0); //For glDrawElements // Draw the triangle ! // glDrawArrays(GL_TRIANGLES, 0, 3); // Starting from vertex 0; 3 vertices total -> 1 triangle glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, ssbo_pos); timeval a, b; gettimeofday(&a, NULL); glDrawElementsInstanced(GL_TRIANGLES, 3, GL_UNSIGNED_SHORT, NULL, num_particles); gettimeofday(&b, NULL); std::cout << "OpenGL Rendering: " << getElapsed(a, b) << "s" << std::endl; glDisableVertexAttribArray(0); // for(auto &particle : simulator.all) { // //std::cout << particle.position.x << "," << particle.position.y << "," << particle.position.z << std::endl; // /* if it is iron */ // glm::mat4 mod(1.0f); // glm::mat4 model(1.0f); // model = glm::translate(model, particle.position); // if(particle.material) { // glUniform4fv(color_location, 1, simulator.planets[0]->getIronColor()); // } // else { // glUniform4fv(color_location, 1, simulator.planets[0]->getSilicateColor()); // } // // unsigned int modelLoc = glGetUniformLocation(shaderProgram, "model"); // glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model)); // glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // glutSolidSphere(150,10,10); // // glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); // glutSolidSphere(150,10,10); // // } // sleep(5); // for(int i=0; i<2; i++) { // GLint color_location = glGetUniformLocation(shaderProgram, "myColor"); // glUniform4fv(color_location, 1, simulator.planets[0]->getSilicateColor()); // // for(auto &particle : simulator.planets[i]->getSilicateParticles()) { // std::cout << particle.position.x << "," << particle.position.y << "," << particle.position.z << std::endl; // glm::mat4 mod(1.0f); // glm::mat4 model(1.0f); // model = glm::translate(model, particle.position); // unsigned int modelLoc = glGetUniformLocation(shaderProgram, "model"); // glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model)); // glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // glutSolidSphere(150,10,10); // // glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); // glutSolidSphere(150,10,10); // } // // color_location = glGetUniformLocation(shaderProgram, "myColor"); // glUniform4fv(color_location, 1, simulator.planets[0]->getIronColor()); // // for(auto &particle : simulator.planets[i]->getIronParticles()) { // glm::mat4 mod(1.0f); // glm::mat4 model(1.0f); // model = glm::translate(model, particle.position); // unsigned int modelLoc = glGetUniformLocation(shaderProgram, "model"); // glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model)); // glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // glutSolidSphere(150,10,10); // // glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); // glutSolidSphere(150,10,10); // } // } // Important note: The following function flushes the rendering // queue, but this is only for single-buffered rendering. You // must replace this function following the previous indications. //glFlush(); glutSwapBuffers(); glutPostRedisplay(); } void processKeys(unsigned char key, int x, int y) { printf("%d ", key); switch(key) { case 27: throw "exit"; break; case '-': { camera.z--; glm::mat4 view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); } break; case '+': { camera.z++; glm::mat4 view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); } break; } } void processSpecialKeys(int key, int x, int y) { switch(key) { case GLUT_KEY_LEFT: { //camera = glm::rotate(camera, glm::radians(-2.0f), glm::vec3(0.0, 1.0, 0.0)); glm::mat4 rotationMat(1); rotationMat = glm::rotate(rotationMat, 1.0f, glm::vec3(0.0, 1.0, 0.0)); //vec = glm::vec3(rotationMat * glm::vec4(vec, 1.0)); glm::vec4 newCamera(camera.x, camera.y, camera.z, 1.0); newCamera = rotationMat * newCamera; //camera = rotationMat * camera; camera = newCamera; glm::mat4 view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); } break; case GLUT_KEY_RIGHT: { //camera = glm::rotate(camera, glm::radians(-2.0f), glm::vec3(0.0, 1.0, 0.0)); glm::mat4 rotationMat(1); rotationMat = glm::rotate(rotationMat, -1.0f, glm::vec3(0.0, 1.0, 0.0)); //vec = glm::vec3(rotationMat * glm::vec4(vec, 1.0)); glm::vec4 newCamera(camera.x, camera.y, camera.z, 1.0); newCamera = rotationMat * newCamera; //camera = rotationMat * camera; camera = newCamera; glm::mat4 view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); } break; } } int main(int argc, char **argv) { if(argc==3) { NUM_IRON_PARTICLES = std::atoi(argv[1]); NUM_SILICATE_PARTICLES = std::atoi(argv[2]); } // Initialize FreeGLUT and create the window glutInit(&argc, argv); // Setup the window (e.g., size, display mode and so on) glutInitWindowSize(1280, 720); glutInitWindowPosition(100, 100); glutInitDisplayMode(GLUT_RGBA); // Make FreeGLUT return from the main rendering loop glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_GLUTMAINLOOP_RETURNS); // Create the window and associate the callbacks glutCreateWindow("Applied GPU Programming"); glutDisplayFunc(display); // glutIdleFunc( ... ); // glutReshapeFunc( ... ); glutKeyboardFunc(processKeys); glutSpecialFunc(processSpecialKeys); // glutMouseFunc( ... ); // glutMotionFunc( ... ); // Init GLAD to be able to access the OpenGL API if (!gladLoadGL()) { return GL_INVALID_OPERATION; } // Display OpenGL information util::displayOpenGLInfo(); // Initialize the 3D view init(); // Launch the main loop for rendering glutMainLoop(); // Release all the allocated memory release(); return 0; }
d867f339ca8ed535b2154ac72a80c0aa2ac036fb.cu
#include <glm/vec3.hpp> #include "glad/glad.h" #include "glm/gtc/type_ptr.hpp" #include <cuda_gl_interop.h> #include <memory> #include <chrono> #include <random> #include "planet.hpp" #include "simulation.h" #include "common.hpp" #include "util.hpp" using namespace std; using namespace glm; using namespace agp; GLuint g_default_vao = 0; glm::vec3 camera; unsigned int shaderProgram = 0; int NUM_SILICATE_PARTICLES = 6000; int NUM_IRON_PARTICLES = 9000; //std::unique_ptr<GPUSimulation> simulator; const int num_particles = 2*(NUM_SILICATE_PARTICLES + NUM_IRON_PARTICLES); GPUSimulation simulator; cudaGraphicsResource_t ssbo_handle; // This will identify our vertex buffer GLuint vertexbuffer; GLuint materialBuffer; GLuint IndexVBOID; GLuint ssbo_pos; //Particle_vec4 part_array[num_particles]; GLfloat triangle[] = {0.0f, 150.0f, 0.0f, -150.0f, -150.0f, 0.0f, 150.0f, -150.0f, 0.0f}; void init() { //simulator = std::make_unique<GPUSimulation>(); simulator.init(); // Generate and bind the default VAO glGenVertexArrays(1, &g_default_vao); glBindVertexArray(g_default_vao); // Set the background color (RGBA) glClearColor(0.0f, 0.0f, 0.0f, 0.5f); // Your OpenGL settings, such as alpha, depth and others, should be // defined here! For the assignment, we only ask you to enable the // alpha channel. glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); shaderProgram = util::loadShaders("vt.glsl", "fg.glsl"); glUseProgram(shaderProgram); camera = glm::vec3(0.0, 0.0, 50000.0); glm::mat4 model, view, projection; model = glm::mat4(1.0f); view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); projection = glm::perspective(glm::radians(60.0f), (float)1280 / (float)720, 0.1f, 100000.0f); unsigned int modelLoc = glGetUniformLocation(shaderProgram, "model"); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); unsigned int projectionLoc = glGetUniformLocation(shaderProgram, "projection"); glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model)); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); glUniformMatrix4fv(projectionLoc, 1, GL_FALSE, glm::value_ptr(projection)); glGenBuffers(1, &ssbo_pos); glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo_pos); glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(Particle)*simulator.all.size(), &simulator.all[0], GL_DYNAMIC_DRAW); glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); // unbind cudaGraphicsGLRegisterBuffer(&ssbo_handle, ssbo_pos, cudaGraphicsRegisterFlagsNone); // Generate 1 buffer, put the resulting identifier in vertexbuffer glGenBuffers(1, &vertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer); // Give our vertices to OpenGL. glBufferData(GL_ARRAY_BUFFER, sizeof(triangle), &triangle, GL_DYNAMIC_DRAW); ushort pindices[3]; pindices[0] = 0; pindices[1] = 1; pindices[2] = 2; glGenBuffers(1, &IndexVBOID); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IndexVBOID); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ushort)*3, pindices, GL_DYNAMIC_DRAW); } void release() { // Release the default VAO glDeleteVertexArrays(1, &g_default_vao); // Do not forget to release any memory allocation here! simulator.release(); } void display() { simulator.update(ssbo_handle); // Clear the screen glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //printf("FreeGLUT triggered the display() callback!\n"); // Your rendering code must be here! Do not forget to swap the // front and back buffers, and to force a redisplay to keep the // render loop running. This functionality is available withinglm::vec3 // FreeGLUT as well, check the assignment for more information. GLint color_location = glGetUniformLocation(shaderProgram, "myColor"); glUniform4fv(color_location, 1, simulator.planets[0]->getSilicateColor()); color_location = glGetUniformLocation(shaderProgram, "myColor"); glEnableVertexAttribArray(0); glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer); glEnableClientState(GL_VERTEX_ARRAY); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0); //For glDrawElements // Draw the triangle ! // glDrawArrays(GL_TRIANGLES, 0, 3); // Starting from vertex 0; 3 vertices total -> 1 triangle glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, ssbo_pos); timeval a, b; gettimeofday(&a, NULL); glDrawElementsInstanced(GL_TRIANGLES, 3, GL_UNSIGNED_SHORT, NULL, num_particles); gettimeofday(&b, NULL); std::cout << "OpenGL Rendering: " << getElapsed(a, b) << "s" << std::endl; glDisableVertexAttribArray(0); // for(auto &particle : simulator.all) { // //std::cout << particle.position.x << "," << particle.position.y << "," << particle.position.z << std::endl; // /* if it is iron */ // glm::mat4 mod(1.0f); // glm::mat4 model(1.0f); // model = glm::translate(model, particle.position); // if(particle.material) { // glUniform4fv(color_location, 1, simulator.planets[0]->getIronColor()); // } // else { // glUniform4fv(color_location, 1, simulator.planets[0]->getSilicateColor()); // } // // unsigned int modelLoc = glGetUniformLocation(shaderProgram, "model"); // glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model)); // glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // glutSolidSphere(150,10,10); // // glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); // glutSolidSphere(150,10,10); // // } // sleep(5); // for(int i=0; i<2; i++) { // GLint color_location = glGetUniformLocation(shaderProgram, "myColor"); // glUniform4fv(color_location, 1, simulator.planets[0]->getSilicateColor()); // // for(auto &particle : simulator.planets[i]->getSilicateParticles()) { // std::cout << particle.position.x << "," << particle.position.y << "," << particle.position.z << std::endl; // glm::mat4 mod(1.0f); // glm::mat4 model(1.0f); // model = glm::translate(model, particle.position); // unsigned int modelLoc = glGetUniformLocation(shaderProgram, "model"); // glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model)); // glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // glutSolidSphere(150,10,10); // // glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); // glutSolidSphere(150,10,10); // } // // color_location = glGetUniformLocation(shaderProgram, "myColor"); // glUniform4fv(color_location, 1, simulator.planets[0]->getIronColor()); // // for(auto &particle : simulator.planets[i]->getIronParticles()) { // glm::mat4 mod(1.0f); // glm::mat4 model(1.0f); // model = glm::translate(model, particle.position); // unsigned int modelLoc = glGetUniformLocation(shaderProgram, "model"); // glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model)); // glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // glutSolidSphere(150,10,10); // // glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); // glutSolidSphere(150,10,10); // } // } // Important note: The following function flushes the rendering // queue, but this is only for single-buffered rendering. You // must replace this function following the previous indications. //glFlush(); glutSwapBuffers(); glutPostRedisplay(); } void processKeys(unsigned char key, int x, int y) { printf("%d ", key); switch(key) { case 27: throw "exit"; break; case '-': { camera.z--; glm::mat4 view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); } break; case '+': { camera.z++; glm::mat4 view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); } break; } } void processSpecialKeys(int key, int x, int y) { switch(key) { case GLUT_KEY_LEFT: { //camera = glm::rotate(camera, glm::radians(-2.0f), glm::vec3(0.0, 1.0, 0.0)); glm::mat4 rotationMat(1); rotationMat = glm::rotate(rotationMat, 1.0f, glm::vec3(0.0, 1.0, 0.0)); //vec = glm::vec3(rotationMat * glm::vec4(vec, 1.0)); glm::vec4 newCamera(camera.x, camera.y, camera.z, 1.0); newCamera = rotationMat * newCamera; //camera = rotationMat * camera; camera = newCamera; glm::mat4 view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); } break; case GLUT_KEY_RIGHT: { //camera = glm::rotate(camera, glm::radians(-2.0f), glm::vec3(0.0, 1.0, 0.0)); glm::mat4 rotationMat(1); rotationMat = glm::rotate(rotationMat, -1.0f, glm::vec3(0.0, 1.0, 0.0)); //vec = glm::vec3(rotationMat * glm::vec4(vec, 1.0)); glm::vec4 newCamera(camera.x, camera.y, camera.z, 1.0); newCamera = rotationMat * newCamera; //camera = rotationMat * camera; camera = newCamera; glm::mat4 view = glm::lookAt(camera, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); unsigned int viewLoc = glGetUniformLocation(shaderProgram, "view"); glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view)); } break; } } int main(int argc, char **argv) { if(argc==3) { NUM_IRON_PARTICLES = std::atoi(argv[1]); NUM_SILICATE_PARTICLES = std::atoi(argv[2]); } // Initialize FreeGLUT and create the window glutInit(&argc, argv); // Setup the window (e.g., size, display mode and so on) glutInitWindowSize(1280, 720); glutInitWindowPosition(100, 100); glutInitDisplayMode(GLUT_RGBA); // Make FreeGLUT return from the main rendering loop glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_GLUTMAINLOOP_RETURNS); // Create the window and associate the callbacks glutCreateWindow("Applied GPU Programming"); glutDisplayFunc(display); // glutIdleFunc( ... ); // glutReshapeFunc( ... ); glutKeyboardFunc(processKeys); glutSpecialFunc(processSpecialKeys); // glutMouseFunc( ... ); // glutMotionFunc( ... ); // Init GLAD to be able to access the OpenGL API if (!gladLoadGL()) { return GL_INVALID_OPERATION; } // Display OpenGL information util::displayOpenGLInfo(); // Initialize the 3D view init(); // Launch the main loop for rendering glutMainLoop(); // Release all the allocated memory release(); return 0; }
2649babb9cfda11e873c662350b6ca241ec4c71e.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <cxh@illinois.edu> #define SCC_VARIANT "bitset" #include "scc.h" #include "bitset.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include <thrust/reduce.h> #include <thrust/execution_policy.h> #include "timer.h" #define debug 0 void SCCSolver(int m, int nnz, int *in_row_offsets, int *in_column_indices, int *out_row_offsets, int *out_column_indices, int *h_scc_root) { print_device_info(0); Timer t; int iter = 0; int *d_in_row_offsets, *d_in_column_indices, *d_out_row_offsets, *d_out_column_indices; CUDA_SAFE_CALL(hipMalloc((void **)&d_in_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_in_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_out_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_out_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(hipMemcpy(d_in_row_offsets, in_row_offsets, (m + 1) * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_in_column_indices, in_column_indices, nnz * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_out_row_offsets, out_row_offsets, (m + 1) * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_out_column_indices, out_column_indices, nnz * sizeof(int), hipMemcpyHostToDevice)); unsigned *d_colors, *d_locks; int *d_scc_root; unsigned *h_colors = (unsigned *)malloc(m * sizeof(unsigned)); CUDA_SAFE_CALL(hipMalloc((void **)&d_colors, m * sizeof(unsigned))); CUDA_SAFE_CALL(hipMalloc((void **)&d_locks, (PIVOT_HASH_CONST+1) * sizeof(unsigned))); CUDA_SAFE_CALL(hipMalloc((void **)&d_scc_root, m * sizeof(int))); thrust::fill(thrust::device, d_colors, d_colors + m, INIT_COLOR); thrust::sequence(thrust::device, d_scc_root, d_scc_root + m); unsigned char *h_status = (unsigned char*)malloc(m * sizeof(unsigned char)); unsigned char *d_status; CUDA_SAFE_CALL(hipMalloc((void **)&d_status, sizeof(unsigned char) * m)); CUDA_SAFE_CALL(hipMemset(d_status, 0, m * sizeof(unsigned char))); bool has_pivot; int source; printf("Start solving SCC detection..."); CUDA_SAFE_CALL(hipDeviceSynchronize()); t.Start(); first_trim(m, d_in_row_offsets, d_in_column_indices, d_out_row_offsets, d_out_column_indices, d_status); CUDA_SAFE_CALL(hipMemcpy(h_status, d_status, m * sizeof(bool), hipMemcpyDeviceToHost)); for (int i = 0; i < m; i++) { if(!is_removed(h_status[i])) { printf("vertex %d not eliminated, set as the first pivot\n", i); source = i; break; } } CUDA_SAFE_CALL(hipMemset(&d_status[source], 19, 1)); do { ++ iter; has_pivot = false; if(debug) printf("iteration=%d\n", iter); fwd_reach(m, d_out_row_offsets, d_out_column_indices, d_colors, d_status, d_scc_root); bwd_reach(m, d_in_row_offsets, d_in_column_indices, d_colors, d_status); iterative_trim(m, d_in_row_offsets, d_in_column_indices, d_out_row_offsets, d_out_column_indices, d_colors, d_status, d_scc_root); CUDA_SAFE_CALL(hipMemset(d_locks, 0, (PIVOT_HASH_CONST+1) * sizeof(unsigned))); has_pivot = update(m, d_colors, d_status, d_locks, d_scc_root); } while (has_pivot); CUDA_SAFE_CALL(hipDeviceSynchronize()); t.Stop(); printf("Done\n"); CUDA_SAFE_CALL(hipMemcpy(h_scc_root, d_scc_root, sizeof(unsigned) * m, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(h_status, d_status, sizeof(unsigned char) * m, hipMemcpyDeviceToHost)); print_statistics(m, h_scc_root, h_status); printf("\titerations = %d.\n", iter); printf("\truntime [%s] = %f ms.\n", SCC_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(hipFree(d_in_row_offsets)); CUDA_SAFE_CALL(hipFree(d_in_column_indices)); CUDA_SAFE_CALL(hipFree(d_out_row_offsets)); CUDA_SAFE_CALL(hipFree(d_out_column_indices)); CUDA_SAFE_CALL(hipFree(d_colors)); CUDA_SAFE_CALL(hipFree(d_locks)); CUDA_SAFE_CALL(hipFree(d_status)); free(h_status); }
2649babb9cfda11e873c662350b6ca241ec4c71e.cu
// Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <cxh@illinois.edu> #define SCC_VARIANT "bitset" #include "scc.h" #include "bitset.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include <thrust/reduce.h> #include <thrust/execution_policy.h> #include "timer.h" #define debug 0 void SCCSolver(int m, int nnz, int *in_row_offsets, int *in_column_indices, int *out_row_offsets, int *out_column_indices, int *h_scc_root) { print_device_info(0); Timer t; int iter = 0; int *d_in_row_offsets, *d_in_column_indices, *d_out_row_offsets, *d_out_column_indices; CUDA_SAFE_CALL(cudaMalloc((void **)&d_in_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_in_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_out_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_out_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(cudaMemcpy(d_in_row_offsets, in_row_offsets, (m + 1) * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_in_column_indices, in_column_indices, nnz * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_out_row_offsets, out_row_offsets, (m + 1) * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_out_column_indices, out_column_indices, nnz * sizeof(int), cudaMemcpyHostToDevice)); unsigned *d_colors, *d_locks; int *d_scc_root; unsigned *h_colors = (unsigned *)malloc(m * sizeof(unsigned)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_colors, m * sizeof(unsigned))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_locks, (PIVOT_HASH_CONST+1) * sizeof(unsigned))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_scc_root, m * sizeof(int))); thrust::fill(thrust::device, d_colors, d_colors + m, INIT_COLOR); thrust::sequence(thrust::device, d_scc_root, d_scc_root + m); unsigned char *h_status = (unsigned char*)malloc(m * sizeof(unsigned char)); unsigned char *d_status; CUDA_SAFE_CALL(cudaMalloc((void **)&d_status, sizeof(unsigned char) * m)); CUDA_SAFE_CALL(cudaMemset(d_status, 0, m * sizeof(unsigned char))); bool has_pivot; int source; printf("Start solving SCC detection..."); CUDA_SAFE_CALL(cudaDeviceSynchronize()); t.Start(); first_trim(m, d_in_row_offsets, d_in_column_indices, d_out_row_offsets, d_out_column_indices, d_status); CUDA_SAFE_CALL(cudaMemcpy(h_status, d_status, m * sizeof(bool), cudaMemcpyDeviceToHost)); for (int i = 0; i < m; i++) { if(!is_removed(h_status[i])) { printf("vertex %d not eliminated, set as the first pivot\n", i); source = i; break; } } CUDA_SAFE_CALL(cudaMemset(&d_status[source], 19, 1)); do { ++ iter; has_pivot = false; if(debug) printf("iteration=%d\n", iter); fwd_reach(m, d_out_row_offsets, d_out_column_indices, d_colors, d_status, d_scc_root); bwd_reach(m, d_in_row_offsets, d_in_column_indices, d_colors, d_status); iterative_trim(m, d_in_row_offsets, d_in_column_indices, d_out_row_offsets, d_out_column_indices, d_colors, d_status, d_scc_root); CUDA_SAFE_CALL(cudaMemset(d_locks, 0, (PIVOT_HASH_CONST+1) * sizeof(unsigned))); has_pivot = update(m, d_colors, d_status, d_locks, d_scc_root); } while (has_pivot); CUDA_SAFE_CALL(cudaDeviceSynchronize()); t.Stop(); printf("Done\n"); CUDA_SAFE_CALL(cudaMemcpy(h_scc_root, d_scc_root, sizeof(unsigned) * m, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(h_status, d_status, sizeof(unsigned char) * m, cudaMemcpyDeviceToHost)); print_statistics(m, h_scc_root, h_status); printf("\titerations = %d.\n", iter); printf("\truntime [%s] = %f ms.\n", SCC_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(cudaFree(d_in_row_offsets)); CUDA_SAFE_CALL(cudaFree(d_in_column_indices)); CUDA_SAFE_CALL(cudaFree(d_out_row_offsets)); CUDA_SAFE_CALL(cudaFree(d_out_column_indices)); CUDA_SAFE_CALL(cudaFree(d_colors)); CUDA_SAFE_CALL(cudaFree(d_locks)); CUDA_SAFE_CALL(cudaFree(d_status)); free(h_status); }
0eaf81efb0f9badeee16d287fd471b293e2579fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include <float.h> typedef struct { int x, y; } Point; typedef struct { float4 avg; double inverse_cov[3][3]; double log_det; } Class; __constant__ Class dev_class[32]; float4 Average(uchar4 *data, int w, int h, Point *class_points, int point_n) { float4 result = make_float4(0, 0, 0, 0); for (int i = 0; i < point_n; ++i) { Point p = class_points[i]; uchar4 pixel = data[p.y * w + p.x]; result.x += pixel.x; result.y += pixel.y; result.z += pixel.z; } result.x /= point_n; result.y /= point_n; result.z /= point_n; return result; } void CalculateCovariance(double cov[3][3], uchar4 *data, int w, int h, Point *class_points, int point_n, float4 avg) { for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { cov[i][j] = 0; } } for (int i = 0; i < point_n; ++i) { Point p = class_points[i]; uchar4 pixel = data[p.y * w + p.x]; double delta[3] = {pixel.x - avg.x, pixel.y - avg.y, pixel.z - avg.z}; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { cov[i][j] += delta[i] * delta[j]; } } } for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { cov[i][j] /= point_n - 1; } } } double Determinant(double cov[3][3]) { double det = 0; for (int i = 0; i < 3; ++i) { det += cov[0][i] * cov[1][(i + 1) % 3] * cov[2][(i + 2) % 3]; det -= cov[0][(i + 2) % 3] * cov[1][(i + 1) % 3] * cov[2][i]; } return det; } void Inverse(double in[3][3], double out[3][3]) { double det = Determinant(in); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { out[i][j] = in[(j + 1) % 3][(i + 1) % 3] * in[(j + 2) % 3][(i + 2) % 3] - in[(j + 1) % 3][(i + 2) % 3] * in[(j + 2) % 3][(i + 1) % 3]; out[i][j] /= det; } } } __device__ double MaxLikehoodEstimation(uchar4 p, int class_idx) { Class c = dev_class[class_idx]; double delta[3] = {p.x - c.avg.x, p.y - c.avg.y, p.z - c.avg.z}; double temp[3] = {0,}; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { temp[i] += delta[j] * c.inverse_cov[j][i]; } } double result = -c.log_det; for (int i = 0; i < 3; ++i) { result -= temp[i] * delta[i]; } return result; } __global__ void kernel(uchar4 *image, int w, int h, int class_count) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int i, j; int class_idx; int max_idx = 0; double value; double max_value; for (i = idx; i < w; i += offsetx) { for (j = idy; j < h; j += offsety) { uchar4 pixel = image[j * w + i]; max_value = INT_MIN; for (class_idx = 0; class_idx < class_count; ++class_idx) { value = MaxLikehoodEstimation(pixel, class_idx); if (value > max_value) { max_idx = class_idx; max_value = value; } } image[j * w + i] = make_uchar4(pixel.x, pixel.y, pixel.z, max_idx); } } } int main() { char input_file[256], output_file[256]; int class_count; scanf("%s", input_file); scanf("%s", output_file); scanf("%d", &class_count); Point *class_points[class_count]; int w, h; FILE *in = fopen(input_file, "rb"); fread(&w, sizeof(uchar4), 1 , in); fread(&h, sizeof(uchar4), 1 , in); uchar4 *data = (uchar4*) malloc(sizeof(uchar4) * h * w); fread(data, sizeof(uchar4), h * w, in); fclose(in); int point_n[class_count]; for (int i = 0; i < class_count; ++i) { scanf("%d", &point_n[i]); class_points[i] = (Point *) malloc(sizeof(Point) * point_n[i]); for (int j = 0; j < point_n[i]; ++j) { scanf("%d%d", &class_points[i][j].x, &class_points[i][j].y); } } Class class_arr[class_count]; double cov[3][3]; for (int i = 0; i < class_count; ++i) { class_arr[i].avg = Average(data, w, h, class_points[i], point_n[i]); CalculateCovariance(cov, data, w, h, class_points[i], point_n[i], class_arr[i].avg); Inverse(cov, class_arr[i].inverse_cov); class_arr[i].log_det = log(Determinant(cov)); } uchar4 *dev_data; hipMalloc(&dev_data, sizeof(uchar4) * h * w); hipMemcpy(dev_data, data, sizeof(uchar4) * h * w, hipMemcpyHostToDevice); hipMemcpyToSymbol(dev_class, class_arr, sizeof(Class) * class_count); hipLaunchKernelGGL(( kernel), dim3(dim3(32,32)), dim3(dim3(32, 32)), 0, 0, dev_data, w, h, class_count); hipMemcpy(data, dev_data, sizeof(uchar4) * h * w, hipMemcpyDeviceToHost); FILE *out = fopen(output_file, "wb"); fwrite(&w, sizeof(uchar4), 1, out); fwrite(&h, sizeof(uchar4), 1, out); fwrite(data, sizeof(uchar4), h * w, out); fclose(out); hipFree(dev_data); for (int i = 0; i < class_count; ++i) { free(class_points[i]); } free(data); return 0; }
0eaf81efb0f9badeee16d287fd471b293e2579fd.cu
#include <stdio.h> #include <math.h> #include <float.h> typedef struct { int x, y; } Point; typedef struct { float4 avg; double inverse_cov[3][3]; double log_det; } Class; __constant__ Class dev_class[32]; float4 Average(uchar4 *data, int w, int h, Point *class_points, int point_n) { float4 result = make_float4(0, 0, 0, 0); for (int i = 0; i < point_n; ++i) { Point p = class_points[i]; uchar4 pixel = data[p.y * w + p.x]; result.x += pixel.x; result.y += pixel.y; result.z += pixel.z; } result.x /= point_n; result.y /= point_n; result.z /= point_n; return result; } void CalculateCovariance(double cov[3][3], uchar4 *data, int w, int h, Point *class_points, int point_n, float4 avg) { for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { cov[i][j] = 0; } } for (int i = 0; i < point_n; ++i) { Point p = class_points[i]; uchar4 pixel = data[p.y * w + p.x]; double delta[3] = {pixel.x - avg.x, pixel.y - avg.y, pixel.z - avg.z}; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { cov[i][j] += delta[i] * delta[j]; } } } for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { cov[i][j] /= point_n - 1; } } } double Determinant(double cov[3][3]) { double det = 0; for (int i = 0; i < 3; ++i) { det += cov[0][i] * cov[1][(i + 1) % 3] * cov[2][(i + 2) % 3]; det -= cov[0][(i + 2) % 3] * cov[1][(i + 1) % 3] * cov[2][i]; } return det; } void Inverse(double in[3][3], double out[3][3]) { double det = Determinant(in); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { out[i][j] = in[(j + 1) % 3][(i + 1) % 3] * in[(j + 2) % 3][(i + 2) % 3] - in[(j + 1) % 3][(i + 2) % 3] * in[(j + 2) % 3][(i + 1) % 3]; out[i][j] /= det; } } } __device__ double MaxLikehoodEstimation(uchar4 p, int class_idx) { Class c = dev_class[class_idx]; double delta[3] = {p.x - c.avg.x, p.y - c.avg.y, p.z - c.avg.z}; double temp[3] = {0,}; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { temp[i] += delta[j] * c.inverse_cov[j][i]; } } double result = -c.log_det; for (int i = 0; i < 3; ++i) { result -= temp[i] * delta[i]; } return result; } __global__ void kernel(uchar4 *image, int w, int h, int class_count) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int i, j; int class_idx; int max_idx = 0; double value; double max_value; for (i = idx; i < w; i += offsetx) { for (j = idy; j < h; j += offsety) { uchar4 pixel = image[j * w + i]; max_value = INT_MIN; for (class_idx = 0; class_idx < class_count; ++class_idx) { value = MaxLikehoodEstimation(pixel, class_idx); if (value > max_value) { max_idx = class_idx; max_value = value; } } image[j * w + i] = make_uchar4(pixel.x, pixel.y, pixel.z, max_idx); } } } int main() { char input_file[256], output_file[256]; int class_count; scanf("%s", input_file); scanf("%s", output_file); scanf("%d", &class_count); Point *class_points[class_count]; int w, h; FILE *in = fopen(input_file, "rb"); fread(&w, sizeof(uchar4), 1 , in); fread(&h, sizeof(uchar4), 1 , in); uchar4 *data = (uchar4*) malloc(sizeof(uchar4) * h * w); fread(data, sizeof(uchar4), h * w, in); fclose(in); int point_n[class_count]; for (int i = 0; i < class_count; ++i) { scanf("%d", &point_n[i]); class_points[i] = (Point *) malloc(sizeof(Point) * point_n[i]); for (int j = 0; j < point_n[i]; ++j) { scanf("%d%d", &class_points[i][j].x, &class_points[i][j].y); } } Class class_arr[class_count]; double cov[3][3]; for (int i = 0; i < class_count; ++i) { class_arr[i].avg = Average(data, w, h, class_points[i], point_n[i]); CalculateCovariance(cov, data, w, h, class_points[i], point_n[i], class_arr[i].avg); Inverse(cov, class_arr[i].inverse_cov); class_arr[i].log_det = log(Determinant(cov)); } uchar4 *dev_data; cudaMalloc(&dev_data, sizeof(uchar4) * h * w); cudaMemcpy(dev_data, data, sizeof(uchar4) * h * w, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(dev_class, class_arr, sizeof(Class) * class_count); kernel<<<dim3(32,32), dim3(32, 32)>>>(dev_data, w, h, class_count); cudaMemcpy(data, dev_data, sizeof(uchar4) * h * w, cudaMemcpyDeviceToHost); FILE *out = fopen(output_file, "wb"); fwrite(&w, sizeof(uchar4), 1, out); fwrite(&h, sizeof(uchar4), 1, out); fwrite(data, sizeof(uchar4), h * w, out); fclose(out); cudaFree(dev_data); for (int i = 0; i < class_count; ++i) { free(class_points[i]); } free(data); return 0; }
30881c6681f9176509382f06e2cad7ee100fba0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudakernel/unary/unary.h" #include <hip/hip_fp16.h> enum UnaryOpType { Unary_Unknown = 0, Unary_Abs, Unary_Relu, Unary_Sigmoid, Unary_Sqrt, Unary_TanH, Unary_Floor, Unary_Ceil, Unary_OpNum, Unary_ForceWord = INT_MAX, }; #if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9 template <UnaryOpType OpT, typename DataT> __device__ __inline__ DataT ppl_scalar_unary(const DataT& in_val); template <> __device__ __inline__ float ppl_scalar_unary<Unary_Abs, float>(const float& in_val) { return fabsf(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Abs, half>(const half& in_val) { return __float2half(fabsf(__half2float(in_val))); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Relu, float>(const float& in_val) { float res; res = (in_val > 0) ? in_val : 0; return res; } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Relu, half>(const half& in_val) { half res; res = __hgt(in_val, 0) ? in_val : half(0); return res; } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Sigmoid, float>(const float& in_val) { return 1.f / (1.f + expf(-in_val)); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Sigmoid, half>(const half& in_val) { float in_valf = __half2float(in_val); float resf = 1.f / (1.f + expf(-in_valf)); return __float2half(resf); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Sqrt, float>(const float& in_val) { return sqrt(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Sqrt, half>(const half& in_val) { return __float2half(sqrt(__half2float(in_val))); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_TanH, float>(const float& in_val) { return tanh(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_TanH, half>(const half& in_val) { return __float2half(tanh(__half2float(in_val))); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Floor, float>(const float& in_val) { return floor(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Floor, half>(const half& in_val) { return hfloor(in_val); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Ceil, float>(const float& in_val) { return ceil(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Ceil, half>(const half& in_val) { return hceil(in_val); } #endif template <UnaryOpType OpT, typename DataT> __global__ void ppl_cukernel_unary_any( const uint64_t num_elems, const DataT* input, DataT* output) { #if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9 uint64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_elems) return; DataT in_val = input[index]; output[index] = ppl_scalar_unary<OpT, DataT>(in_val); #endif } #define UNARY_INSTANT(TYPE) \ ppl::common::RetCode PPLCUDAUnary##TYPE##ForwardImp( \ hipStream_t stream, \ const ppl::nn::TensorShape* input_shape, \ const void* input, \ const ppl::nn::TensorShape* output_shape, \ void* output) \ { \ uint64_t num_elems = output_shape->GetElementsIncludingPadding(); \ int block_size = 256; \ uint64_t grid_size = (num_elems + block_size - 1) / block_size; \ if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) { \ hipLaunchKernelGGL(( ppl_cukernel_unary_any<Unary_##TYPE, float>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (const float*)input, (float*)output); \ } else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) { \ hipLaunchKernelGGL(( ppl_cukernel_unary_any<Unary_##TYPE, half>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (const half*)input, (half*)output); \ } else { \ return ppl::common::RC_UNSUPPORTED; \ } \ return ppl::common::RC_SUCCESS; \ } UNARY_INSTANT(Abs); UNARY_INSTANT(Relu); UNARY_INSTANT(TanH); UNARY_INSTANT(Sigmoid); UNARY_INSTANT(Sqrt); UNARY_INSTANT(Floor); UNARY_INSTANT(Ceil); #undef UNARY_INSTANT
30881c6681f9176509382f06e2cad7ee100fba0f.cu
#include "cudakernel/unary/unary.h" #include <cuda_fp16.h> enum UnaryOpType { Unary_Unknown = 0, Unary_Abs, Unary_Relu, Unary_Sigmoid, Unary_Sqrt, Unary_TanH, Unary_Floor, Unary_Ceil, Unary_OpNum, Unary_ForceWord = INT_MAX, }; #if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9 template <UnaryOpType OpT, typename DataT> __device__ __inline__ DataT ppl_scalar_unary(const DataT& in_val); template <> __device__ __inline__ float ppl_scalar_unary<Unary_Abs, float>(const float& in_val) { return fabsf(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Abs, half>(const half& in_val) { return __float2half(fabsf(__half2float(in_val))); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Relu, float>(const float& in_val) { float res; res = (in_val > 0) ? in_val : 0; return res; } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Relu, half>(const half& in_val) { half res; res = __hgt(in_val, 0) ? in_val : half(0); return res; } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Sigmoid, float>(const float& in_val) { return 1.f / (1.f + expf(-in_val)); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Sigmoid, half>(const half& in_val) { float in_valf = __half2float(in_val); float resf = 1.f / (1.f + expf(-in_valf)); return __float2half(resf); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Sqrt, float>(const float& in_val) { return sqrt(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Sqrt, half>(const half& in_val) { return __float2half(sqrt(__half2float(in_val))); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_TanH, float>(const float& in_val) { return tanh(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_TanH, half>(const half& in_val) { return __float2half(tanh(__half2float(in_val))); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Floor, float>(const float& in_val) { return floor(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Floor, half>(const half& in_val) { return hfloor(in_val); } template <> __device__ __inline__ float ppl_scalar_unary<Unary_Ceil, float>(const float& in_val) { return ceil(in_val); } template <> __device__ __inline__ half ppl_scalar_unary<Unary_Ceil, half>(const half& in_val) { return hceil(in_val); } #endif template <UnaryOpType OpT, typename DataT> __global__ void ppl_cukernel_unary_any( const uint64_t num_elems, const DataT* input, DataT* output) { #if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9 uint64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_elems) return; DataT in_val = input[index]; output[index] = ppl_scalar_unary<OpT, DataT>(in_val); #endif } #define UNARY_INSTANT(TYPE) \ ppl::common::RetCode PPLCUDAUnary##TYPE##ForwardImp( \ cudaStream_t stream, \ const ppl::nn::TensorShape* input_shape, \ const void* input, \ const ppl::nn::TensorShape* output_shape, \ void* output) \ { \ uint64_t num_elems = output_shape->GetElementsIncludingPadding(); \ int block_size = 256; \ uint64_t grid_size = (num_elems + block_size - 1) / block_size; \ if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) { \ ppl_cukernel_unary_any<Unary_##TYPE, float><<<grid_size, block_size, 0, stream>>>(num_elems, (const float*)input, (float*)output); \ } else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) { \ ppl_cukernel_unary_any<Unary_##TYPE, half><<<grid_size, block_size, 0, stream>>>(num_elems, (const half*)input, (half*)output); \ } else { \ return ppl::common::RC_UNSUPPORTED; \ } \ return ppl::common::RC_SUCCESS; \ } UNARY_INSTANT(Abs); UNARY_INSTANT(Relu); UNARY_INSTANT(TanH); UNARY_INSTANT(Sigmoid); UNARY_INSTANT(Sqrt); UNARY_INSTANT(Floor); UNARY_INSTANT(Ceil); #undef UNARY_INSTANT
56f92357bb660544e487552fbbb1e12ae9b45afd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and proprietary * rights in and to this software, related documentation and any modifications thereto. * Any use, reproduction, disclosure or distribution of this software and related * documentation without an express license agreement from NVIDIA Corporation is strictly * prohibited. * * TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS* * AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED, * INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY * SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT * LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF * BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR * INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF * SUCH DAMAGES */ #include <stdlib.h> #include <stdio.h> #include "../../include/optix_world.h" using namespace optix; // Find the probe direction closest to the query direction __device__ uint GetMatchingDir(const float3 *probe_dirs, uint probe_dirs_size, const float3 &in) { float maxDot = -1e35f; uint bestPr = 0u; for(uint pr=0; pr<probe_dirs_size; pr++) { float d = dot(probe_dirs[pr], in); if(d>maxDot) { maxDot = d; bestPr = pr; } } return bestPr; } extern "C" __global__ void LOS_reduction( const float4 *moving_objs, float4 *moving_objs_other_frame, uint moving_obj_count, const float3 *probe_dirs, uint probe_dirs_size, const float *output_LOS_buffer, const float *output_probe_buffer, float collision_distance, float miss_range, float target_speed ) { uint v = blockIdx.x; float3 V = optix::make_float3( moving_objs[v] ); // Find a target to run from or toward float extremeTDstSqr = 1e35f; float3 TargetDir; uint width = moving_obj_count; for(size_t t=0; t<width; t++) { if(v==t) continue; uint coords = (v<t) ? (v*moving_obj_count+t) : (t*moving_obj_count+v); float cdst = output_LOS_buffer[coords]; // Keep in the upper diagonal if(cdst != miss_range) continue; // Can't see this target float3 T = optix::make_float3( moving_objs[t] ); float3 dir = T - V; // Direction to target float tdstSqr = dot(dir,dir); // Distance to target if(tdstSqr < extremeTDstSqr) { extremeTDstSqr = tdstSqr; if(v&1) { // Odd: Green: if(t&1) TargetDir = -dir; // Go away from closest guy in view if it's green else TargetDir = dir; // Go toward closest guy in view if it's red } else { // Even: Red: if(t&1) TargetDir = -dir; // Go away from closest guy in view if it's green else TargetDir = -dir; // Go away from closest guy in view if it's red } } } // Can't acquire a target so try to keep moving forward if(extremeTDstSqr == 1e35f) { float3 V0 = optix::make_float3( moving_objs_other_frame[v] ); // Get my old position TargetDir = V - V0; } // We must be careful to only go in EXACTLY a direction we've probed, // not in a TargetDir. This includes not setting the z component to 0. // See if there is a wall in the target direction uint pro = GetMatchingDir(probe_dirs, probe_dirs_size, TargetDir); float Range = output_probe_buffer[v*probe_dirs_size+pro]; if(Range > collision_distance) { // Plenty of room ahead; go for it TargetDir = probe_dirs[pro]; } else { // Find the closest direction to the target that has no collision // This can press us into but not through walls. // We can step diagonally toward the wall. But why don't we then go through it? // Because there are no unoccluded probes in the wall's half space. float maxDot = -1e35f; size_t bestPr = 0u; for(size_t pr=0; pr<probe_dirs_size; pr++) { float d = dot(probe_dirs[pr], TargetDir); float Range = output_probe_buffer[v*probe_dirs_size+pr]; if(Range == miss_range && d > maxDot) { maxDot = d; bestPr = pr; } } TargetDir = probe_dirs[bestPr]; } TargetDir = normalize(TargetDir); float3 NewVel = TargetDir * ((v&1) ? target_speed : target_speed*1.2f); float3 VOut = V + NewVel; moving_objs_other_frame[v] = optix::make_float4(VOut,1.0f); } extern "C" __host__ void LOS_reduction_CUDA( const float4 *moving_objs, float4 *moving_objs_other_frame, uint moving_obj_count, const float3 *probe_dirs, uint probe_dirs_size, const float *output_LOS_buffer, const float *output_probe_buffer, float collision_distance, float miss_range, float target_speed ) { hipLaunchKernelGGL(( LOS_reduction) , dim3(moving_obj_count), dim3(1) , 0, 0, moving_objs, moving_objs_other_frame, moving_obj_count, probe_dirs, probe_dirs_size, output_LOS_buffer, output_probe_buffer, collision_distance, miss_range, target_speed ); }
56f92357bb660544e487552fbbb1e12ae9b45afd.cu
/* * Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and proprietary * rights in and to this software, related documentation and any modifications thereto. * Any use, reproduction, disclosure or distribution of this software and related * documentation without an express license agreement from NVIDIA Corporation is strictly * prohibited. * * TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS* * AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED, * INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY * SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT * LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF * BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR * INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF * SUCH DAMAGES */ #include <stdlib.h> #include <stdio.h> #include "../../include/optix_world.h" using namespace optix; // Find the probe direction closest to the query direction __device__ uint GetMatchingDir(const float3 *probe_dirs, uint probe_dirs_size, const float3 &in) { float maxDot = -1e35f; uint bestPr = 0u; for(uint pr=0; pr<probe_dirs_size; pr++) { float d = dot(probe_dirs[pr], in); if(d>maxDot) { maxDot = d; bestPr = pr; } } return bestPr; } extern "C" __global__ void LOS_reduction( const float4 *moving_objs, float4 *moving_objs_other_frame, uint moving_obj_count, const float3 *probe_dirs, uint probe_dirs_size, const float *output_LOS_buffer, const float *output_probe_buffer, float collision_distance, float miss_range, float target_speed ) { uint v = blockIdx.x; float3 V = optix::make_float3( moving_objs[v] ); // Find a target to run from or toward float extremeTDstSqr = 1e35f; float3 TargetDir; uint width = moving_obj_count; for(size_t t=0; t<width; t++) { if(v==t) continue; uint coords = (v<t) ? (v*moving_obj_count+t) : (t*moving_obj_count+v); float cdst = output_LOS_buffer[coords]; // Keep in the upper diagonal if(cdst != miss_range) continue; // Can't see this target float3 T = optix::make_float3( moving_objs[t] ); float3 dir = T - V; // Direction to target float tdstSqr = dot(dir,dir); // Distance to target if(tdstSqr < extremeTDstSqr) { extremeTDstSqr = tdstSqr; if(v&1) { // Odd: Green: if(t&1) TargetDir = -dir; // Go away from closest guy in view if it's green else TargetDir = dir; // Go toward closest guy in view if it's red } else { // Even: Red: if(t&1) TargetDir = -dir; // Go away from closest guy in view if it's green else TargetDir = -dir; // Go away from closest guy in view if it's red } } } // Can't acquire a target so try to keep moving forward if(extremeTDstSqr == 1e35f) { float3 V0 = optix::make_float3( moving_objs_other_frame[v] ); // Get my old position TargetDir = V - V0; } // We must be careful to only go in EXACTLY a direction we've probed, // not in a TargetDir. This includes not setting the z component to 0. // See if there is a wall in the target direction uint pro = GetMatchingDir(probe_dirs, probe_dirs_size, TargetDir); float Range = output_probe_buffer[v*probe_dirs_size+pro]; if(Range > collision_distance) { // Plenty of room ahead; go for it TargetDir = probe_dirs[pro]; } else { // Find the closest direction to the target that has no collision // This can press us into but not through walls. // We can step diagonally toward the wall. But why don't we then go through it? // Because there are no unoccluded probes in the wall's half space. float maxDot = -1e35f; size_t bestPr = 0u; for(size_t pr=0; pr<probe_dirs_size; pr++) { float d = dot(probe_dirs[pr], TargetDir); float Range = output_probe_buffer[v*probe_dirs_size+pr]; if(Range == miss_range && d > maxDot) { maxDot = d; bestPr = pr; } } TargetDir = probe_dirs[bestPr]; } TargetDir = normalize(TargetDir); float3 NewVel = TargetDir * ((v&1) ? target_speed : target_speed*1.2f); float3 VOut = V + NewVel; moving_objs_other_frame[v] = optix::make_float4(VOut,1.0f); } extern "C" __host__ void LOS_reduction_CUDA( const float4 *moving_objs, float4 *moving_objs_other_frame, uint moving_obj_count, const float3 *probe_dirs, uint probe_dirs_size, const float *output_LOS_buffer, const float *output_probe_buffer, float collision_distance, float miss_range, float target_speed ) { LOS_reduction <<< moving_obj_count, 1 >>> ( moving_objs, moving_objs_other_frame, moving_obj_count, probe_dirs, probe_dirs_size, output_LOS_buffer, output_probe_buffer, collision_distance, miss_range, target_speed ); }
6d51665ded8caf91724aa0af70d29e3da251e5b8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "util.h" #include "kernel.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define MASS .5f #define SHEAR_LEN 1.414213562f #define BEND_LEN 2.0f #define DAMPING 1.0f #define G 2.8f #define K 200.0f float* d_vertexData = 0; float* d_normal = 0; glm::vec3* d_vel = 0; glm::vec3* d_force = 0; glm::vec3* d_pos = 0; glm::vec3* d_pos2 = 0; __global__ void copyVertexDatakernel( glm::vec3* pos, glm::vec3* pos2, float4* vertexData, int massXNum, int massYNum ) { int2 idx; int offset; idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; if( idx.x < massXNum && idx.y < massYNum ) { pos[offset].x = vertexData[ offset ].x; pos[offset].y = vertexData[ offset ].y; pos[offset].z = vertexData[ offset ].z; pos2[offset] = pos[offset]; } } int initCUDABuffer( cudaGraphicsResource* &vboResource, int massXNum, int massYNum ) { cudaErrorCheck( hipMalloc((void**)&d_vel, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( hipMalloc((void**)&d_force, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( hipMalloc((void**)&d_pos, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( hipMalloc((void**)&d_pos2, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( hipMemset( (void*)d_vel, 0, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( hipMemset( (void*)d_force, 0, massXNum * massYNum * sizeof( glm::vec3 ) ) ); //populate the pos buffer with data from VBO size_t vboSize; dim3 blockSize = dim3(16*16); dim3 gridSize = dim3( (massXNum + blockSize.x-1)/blockSize.x, (massYNum + blockSize.y-1)/blockSize.y ); cudaErrorCheck( hipGraphicsMapResources( 1, &vboResource, 0 ) ); cudaErrorCheck( hipGraphicsResourceGetMappedPointer((void**) &d_vertexData, &vboSize, vboResource ) ); hipLaunchKernelGGL(( copyVertexDatakernel), dim3(gridSize),dim3(blockSize), 0, 0, d_pos, d_pos2, (float4*)d_vertexData, massXNum, massYNum ); cudaErrorCheck( hipGraphicsUnmapResources( 1, &vboResource, 0 ) ); return 0; } int cleanupCUDA() { if( d_vel ) hipFree( d_vel ); d_vel = 0; if( d_force ) hipFree( d_force ); d_force = 0; if( d_pos ) hipFree( d_pos ); d_pos = 0; if( d_pos2 ) hipFree( d_pos2 ); d_pos2 = 0; return 0; } __global__ void updateVelKernel( float4* vertexData, glm::vec3* pos, glm::vec3* force, glm::vec3* vel, float2 restlen, int massXNum, int massYNum ) { int2 idx; int2 adj; int offset; glm::vec3 vec; idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; glm::vec3 internalforce( 0.0f,0.0f,0.0f ); if( idx.x < massXNum && idx.y < massYNum ) { //internal force - stretch adj.x = idx.x-1; if( (adj.x) >= 0 ) { vec = pos[ offset - 1] - pos[ offset ]; internalforce += K * ( vec - restlen.x * glm::normalize( vec ) ) ; } adj.x = idx.x+1; if( ( adj.x ) < massXNum ) { vec = pos[ idx.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - restlen.x * glm::normalize( vec ) ); } adj.y = idx.y - 1; if( (adj.y) >= 0 ) { vec = pos[ adj.y * massXNum + idx.x] - pos[ offset ]; internalforce += K * ( vec - restlen.x * glm::normalize( vec ) ); } adj.y = idx.y+1; if( ( adj.y) < massYNum ) { vec = pos[ adj.y * massXNum + idx.x] - pos[ offset ]; internalforce += K * ( vec - restlen.x * glm::normalize( vec ) ); } //internal force - shear if( (adj.x = idx.x -1 ) >= 0 && (adj.y=idx.y-1) >=0 ) { vec = pos[ adj.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - SHEAR_LEN*restlen.x * glm::normalize( vec ) ); } if( (adj.x = idx.x +1 ) < massXNum && (adj.y=idx.y+1) < massYNum ) { vec = pos[ adj.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - SHEAR_LEN*restlen.x * glm::normalize( vec ) ); } if( (adj.x = idx.x +1 ) < massXNum && (adj.y=idx.y-1) >= 0) { vec = pos[ adj.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - SHEAR_LEN*restlen.x * glm::normalize( vec ) ); } if( (adj.x = idx.x -1 ) >= 0 && (adj.y=idx.y+1) < massYNum ) { vec = pos[ adj.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - SHEAR_LEN*restlen.x * glm::normalize( vec ) ); } //if( idx.x == 0 || idx.x == massXNum-1 || idx.y == 0 || idx.y == massYNum-1 ) //{ //internal force - bend if( (adj.x = idx.x-2) >= 0 ) { vec = pos[ idx.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - BEND_LEN*restlen.x * glm::normalize( vec ) ); } if( ( adj.x = idx.x+2 ) < massXNum ) { vec = pos[ idx.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - BEND_LEN*restlen.x * glm::normalize( vec ) ); } if( (adj.y = idx.y-2) >= 0 ) { vec = pos[ adj.y * massXNum + idx.x] - pos[ offset ]; internalforce += K * ( vec - BEND_LEN*restlen.x * glm::normalize( vec ) ); } if( ( adj.y = idx.y+2 ) < massYNum ) { vec = pos[ adj.y * massXNum + idx.x] - pos[ offset ]; internalforce += K * ( vec - BEND_LEN*restlen.x * glm::normalize( vec ) ); } // } force[offset] = internalforce; } } void updateVelWrapper( cudaGraphicsResource* &vboResource, float2 restlen, int massXNum, int massYNum ) { size_t vboSize; dim3 blockSize = dim3(16*16); dim3 gridSize = dim3( (massXNum + blockSize.x-1)/blockSize.x, (massYNum + blockSize.y-1)/blockSize.y ); cudaErrorCheck( hipGraphicsMapResources( 1, &vboResource, 0 ) ); cudaErrorCheck( hipGraphicsResourceGetMappedPointer((void**) &d_vertexData, &vboSize, vboResource ) ); hipLaunchKernelGGL(( updateVelKernel), dim3(gridSize),dim3(blockSize), 0, 0, (float4*)d_vertexData, d_pos, d_force, d_vel, restlen, massXNum, massYNum ); cudaErrorCheck( hipGraphicsUnmapResources( 1, &vboResource, 0 ) ); } __global__ void updatePosKernel( float4* vertexData, glm::vec3* pos, glm::vec3* pos2, glm::vec3* accel, float3* normals, float dt, float elapse, float windFactor, float2 restlen, int massXNum, int massYNum, glm::vec3 sphere, float radius) { int2 idx; int offset; float diff; glm::vec3 newPos; glm::vec3 totalForce; glm::vec3 windForce; idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; glm::vec3 N; if( idx.x < massXNum && idx.y < massYNum ) { if( idx.y == 0 ) return; //add external forces //Gravity totalForce = accel[offset] + MASS * glm::vec3( 0, -G, 0 ); //Damping force totalForce -= DAMPING*(pos[offset] - pos2[offset])/dt; //wind //totalForce += glm::vec3( sinf(idx.x*idx.y*elapse),cosf(0*elapse), ); N.x = normals[offset].x; N.y = normals[offset].y; N.z = normals[offset].z; windForce = N * glm::dot( N, glm::vec3( 0.0f , 0.0f, sinf(cosf(5.0f*idx.x*idx.y*0*elapse) ) ) ) * windFactor ; totalForce += windForce; //Verlet Integration newPos = 2.0f*pos[offset] - pos2[offset] + (dt*dt*totalForce/MASS); //apply stretch constraints //if( idx.y > 1 ) //{ // diff = glm::distance( newPos, pos[ (idx.y-1)*(massXNum)+idx.x] ) - 1.1f * restlen.x; // if( glm::distance( newPos, pos[(idx.y-1)*(massXNum)+idx.x] ) > 1.1f * restlen.x ) // newPos = pos[(idx.y-1)*massXNum+idx.x] + glm::normalize( newPos - pos[(idx.y-1)*massXNum+idx.x] ) * 1.1f * restlen.x; //} //if( idx.x > 1 ) //{ // if( glm::distance( newPos, pos[idx.y*massXNum+idx.x-1] ) > 1.1f * restlen.x ) // newPos = pos[idx.y*massXNum+idx.x-1] + glm::normalize( newPos - pos[idx.y*massXNum+idx.x-1] ) * 1.1f * restlen.x; //} //if( idx.y < massYNum - 1 ) //{ // if( glm::distance( newPos, pos[(idx.y+1)*(massXNum) + idx.x] ) > 1.1f * restlen.x ) // newPos = pos[(idx.y+1)*(massXNum) + idx.x] + glm::normalize( newPos - pos[(idx.y+1)*(massXNum) + idx.x] ) * 1.1f * restlen.x; //} //if( idx.x < massXNum - 1 ) //{ // if( glm::distance( newPos, pos[idx.y*massXNum+idx.x+1] ) > 1.1f * restlen.x ) // newPos = pos[idx.y*massXNum+idx.x+1] + glm::normalize( newPos - pos[idx.y*massXNum+idx.x+1] ) * 1.1f * restlen.x; //} if( glm::distance( newPos, sphere ) < radius+0.05f ) newPos = sphere + glm::normalize( newPos-sphere) * (radius+0.05f); pos2[offset] = pos[offset]; pos[offset] = newPos; } syncthreads(); if(idx.x < massXNum && idx.y < massYNum) { vertexData[ offset ].x = newPos.x; vertexData[ offset ].y = newPos.y; vertexData[ offset ].z = newPos.z; } } __global__ void updateInterPosKernel( float4* vertexData, glm::vec3* pos, int massXNum, int massYNum ) { int2 idx; int offset; idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; if( idx.x < massXNum && idx.y < massYNum ) { pos[offset].x = vertexData[offset].x; pos[offset].y = vertexData[offset].y; pos[offset].z = vertexData[offset].z; } } void updatePosWrapper( cudaGraphicsResource* &vboResource, cudaGraphicsResource* normalVboSrc, float dt, float elapse, float windFactor, float2 restlen, int massXNum, int massYNum, glm::vec3 &sphere, float radius ) { size_t vboSize; dim3 blockSize = dim3(16*16); dim3 gridSize = dim3( (massXNum + blockSize.x-1)/blockSize.x, (massYNum + blockSize.y-1)/blockSize.y ); cudaErrorCheck( hipGraphicsMapResources( 1, &vboResource, 0 ) ); cudaErrorCheck( hipGraphicsResourceGetMappedPointer((void**) &d_vertexData, &vboSize, vboResource ) ); cudaErrorCheck( hipGraphicsMapResources( 1, &normalVboSrc, 0 ) ); cudaErrorCheck( hipGraphicsResourceGetMappedPointer((void**) &d_normal, &vboSize, normalVboSrc ) ); hipLaunchKernelGGL(( updatePosKernel), dim3(gridSize),dim3(blockSize), 0, 0, (float4*)d_vertexData,d_pos, d_pos2, d_force, (float3*)d_normal, dt, elapse, windFactor, restlen, massXNum, massYNum, sphere, radius ); hipLaunchKernelGGL(( updateInterPosKernel), dim3(gridSize),dim3(blockSize), 0, 0, (float4*)d_vertexData, d_pos, massXNum, massYNum ); cudaErrorCheck( hipGraphicsUnmapResources( 1, &vboResource, 0 ) ); cudaErrorCheck( hipGraphicsUnmapResources( 1, &normalVboSrc, 0 ) ); } __global__ void updateNormalKernel( float3* normals, glm::vec3* pos, int massXNum, int massYNum ) { int2 idx; int offset; glm::vec3 normal(0.0f, 0.0f, 1.0f ); idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; if( idx.x < massXNum-1 && idx.y < massYNum-1 ) { normal = glm::cross( pos[offset+ massXNum] - pos[offset], pos[offset+1] - pos[offset] ); normal = glm::normalize( normal ); } else if( idx.x == massXNum - 1 && idx.y < massYNum-1 ) { normal = glm::cross( pos[offset-1]-pos[offset] , pos[offset+ massXNum ] - pos[offset] ); normal = glm::normalize( normal ); } else if( idx.y == massYNum - 1 && idx.x< massXNum-1 ) { normal = glm::cross( pos[offset+1] - pos[offset], pos[offset- massXNum ] - pos[offset] ); normal = glm::normalize( normal ); } else if( idx.x == massXNum-1 && idx.y == massYNum-1 ) { normal = glm::cross( pos[offset-massXNum]-pos[offset], pos[offset- 1 ]- pos[offset] ); normal = glm::normalize( normal ); } if( idx.x < massXNum && idx.y < massYNum ) { normals[offset].x = normal.x; normals[offset].y = normal.y; normals[offset].z = normal.z; } } void updateNormalWrapper( cudaGraphicsResource* &normalVboSrc, int massXNum, int massYNum ) { size_t vboSize; dim3 blockSize = dim3(16*16); dim3 gridSize = dim3( (massXNum + blockSize.x-1)/blockSize.x, (massYNum + blockSize.y-1)/blockSize.y ); cudaErrorCheck( hipGraphicsMapResources( 1, &normalVboSrc, 0 ) ); cudaErrorCheck( hipGraphicsResourceGetMappedPointer((void**) &d_normal, &vboSize, normalVboSrc ) ); hipLaunchKernelGGL(( updateNormalKernel), dim3(gridSize), dim3(blockSize) , 0, 0, (float3*)d_normal, d_pos, massXNum, massYNum ); cudaErrorCheck( hipGraphicsUnmapResources( 1, &normalVboSrc, 0 ) ); }
6d51665ded8caf91724aa0af70d29e3da251e5b8.cu
#include <stdio.h> #include "util.h" #include "kernel.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define MASS .5f #define SHEAR_LEN 1.414213562f #define BEND_LEN 2.0f #define DAMPING 1.0f #define G 2.8f #define K 200.0f float* d_vertexData = 0; float* d_normal = 0; glm::vec3* d_vel = 0; glm::vec3* d_force = 0; glm::vec3* d_pos = 0; glm::vec3* d_pos2 = 0; __global__ void copyVertexDatakernel( glm::vec3* pos, glm::vec3* pos2, float4* vertexData, int massXNum, int massYNum ) { int2 idx; int offset; idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; if( idx.x < massXNum && idx.y < massYNum ) { pos[offset].x = vertexData[ offset ].x; pos[offset].y = vertexData[ offset ].y; pos[offset].z = vertexData[ offset ].z; pos2[offset] = pos[offset]; } } int initCUDABuffer( cudaGraphicsResource* &vboResource, int massXNum, int massYNum ) { cudaErrorCheck( cudaMalloc((void**)&d_vel, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( cudaMalloc((void**)&d_force, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( cudaMalloc((void**)&d_pos, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( cudaMalloc((void**)&d_pos2, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( cudaMemset( (void*)d_vel, 0, massXNum * massYNum * sizeof( glm::vec3 ) ) ); cudaErrorCheck( cudaMemset( (void*)d_force, 0, massXNum * massYNum * sizeof( glm::vec3 ) ) ); //populate the pos buffer with data from VBO size_t vboSize; dim3 blockSize = dim3(16*16); dim3 gridSize = dim3( (massXNum + blockSize.x-1)/blockSize.x, (massYNum + blockSize.y-1)/blockSize.y ); cudaErrorCheck( cudaGraphicsMapResources( 1, &vboResource, 0 ) ); cudaErrorCheck( cudaGraphicsResourceGetMappedPointer((void**) &d_vertexData, &vboSize, vboResource ) ); copyVertexDatakernel<<<gridSize,blockSize>>>( d_pos, d_pos2, (float4*)d_vertexData, massXNum, massYNum ); cudaErrorCheck( cudaGraphicsUnmapResources( 1, &vboResource, 0 ) ); return 0; } int cleanupCUDA() { if( d_vel ) cudaFree( d_vel ); d_vel = 0; if( d_force ) cudaFree( d_force ); d_force = 0; if( d_pos ) cudaFree( d_pos ); d_pos = 0; if( d_pos2 ) cudaFree( d_pos2 ); d_pos2 = 0; return 0; } __global__ void updateVelKernel( float4* vertexData, glm::vec3* pos, glm::vec3* force, glm::vec3* vel, float2 restlen, int massXNum, int massYNum ) { int2 idx; int2 adj; int offset; glm::vec3 vec; idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; glm::vec3 internalforce( 0.0f,0.0f,0.0f ); if( idx.x < massXNum && idx.y < massYNum ) { //internal force - stretch adj.x = idx.x-1; if( (adj.x) >= 0 ) { vec = pos[ offset - 1] - pos[ offset ]; internalforce += K * ( vec - restlen.x * glm::normalize( vec ) ) ; } adj.x = idx.x+1; if( ( adj.x ) < massXNum ) { vec = pos[ idx.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - restlen.x * glm::normalize( vec ) ); } adj.y = idx.y - 1; if( (adj.y) >= 0 ) { vec = pos[ adj.y * massXNum + idx.x] - pos[ offset ]; internalforce += K * ( vec - restlen.x * glm::normalize( vec ) ); } adj.y = idx.y+1; if( ( adj.y) < massYNum ) { vec = pos[ adj.y * massXNum + idx.x] - pos[ offset ]; internalforce += K * ( vec - restlen.x * glm::normalize( vec ) ); } //internal force - shear if( (adj.x = idx.x -1 ) >= 0 && (adj.y=idx.y-1) >=0 ) { vec = pos[ adj.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - SHEAR_LEN*restlen.x * glm::normalize( vec ) ); } if( (adj.x = idx.x +1 ) < massXNum && (adj.y=idx.y+1) < massYNum ) { vec = pos[ adj.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - SHEAR_LEN*restlen.x * glm::normalize( vec ) ); } if( (adj.x = idx.x +1 ) < massXNum && (adj.y=idx.y-1) >= 0) { vec = pos[ adj.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - SHEAR_LEN*restlen.x * glm::normalize( vec ) ); } if( (adj.x = idx.x -1 ) >= 0 && (adj.y=idx.y+1) < massYNum ) { vec = pos[ adj.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - SHEAR_LEN*restlen.x * glm::normalize( vec ) ); } //if( idx.x == 0 || idx.x == massXNum-1 || idx.y == 0 || idx.y == massYNum-1 ) //{ //internal force - bend if( (adj.x = idx.x-2) >= 0 ) { vec = pos[ idx.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - BEND_LEN*restlen.x * glm::normalize( vec ) ); } if( ( adj.x = idx.x+2 ) < massXNum ) { vec = pos[ idx.y * massXNum + adj.x] - pos[ offset ]; internalforce += K * ( vec - BEND_LEN*restlen.x * glm::normalize( vec ) ); } if( (adj.y = idx.y-2) >= 0 ) { vec = pos[ adj.y * massXNum + idx.x] - pos[ offset ]; internalforce += K * ( vec - BEND_LEN*restlen.x * glm::normalize( vec ) ); } if( ( adj.y = idx.y+2 ) < massYNum ) { vec = pos[ adj.y * massXNum + idx.x] - pos[ offset ]; internalforce += K * ( vec - BEND_LEN*restlen.x * glm::normalize( vec ) ); } // } force[offset] = internalforce; } } void updateVelWrapper( cudaGraphicsResource* &vboResource, float2 restlen, int massXNum, int massYNum ) { size_t vboSize; dim3 blockSize = dim3(16*16); dim3 gridSize = dim3( (massXNum + blockSize.x-1)/blockSize.x, (massYNum + blockSize.y-1)/blockSize.y ); cudaErrorCheck( cudaGraphicsMapResources( 1, &vboResource, 0 ) ); cudaErrorCheck( cudaGraphicsResourceGetMappedPointer((void**) &d_vertexData, &vboSize, vboResource ) ); updateVelKernel<<<gridSize,blockSize>>>( (float4*)d_vertexData, d_pos, d_force, d_vel, restlen, massXNum, massYNum ); cudaErrorCheck( cudaGraphicsUnmapResources( 1, &vboResource, 0 ) ); } __global__ void updatePosKernel( float4* vertexData, glm::vec3* pos, glm::vec3* pos2, glm::vec3* accel, float3* normals, float dt, float elapse, float windFactor, float2 restlen, int massXNum, int massYNum, glm::vec3 sphere, float radius) { int2 idx; int offset; float diff; glm::vec3 newPos; glm::vec3 totalForce; glm::vec3 windForce; idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; glm::vec3 N; if( idx.x < massXNum && idx.y < massYNum ) { if( idx.y == 0 ) return; //add external forces //Gravity totalForce = accel[offset] + MASS * glm::vec3( 0, -G, 0 ); //Damping force totalForce -= DAMPING*(pos[offset] - pos2[offset])/dt; //wind //totalForce += glm::vec3( sinf(idx.x*idx.y*elapse),cosf(0*elapse), ); N.x = normals[offset].x; N.y = normals[offset].y; N.z = normals[offset].z; windForce = N * glm::dot( N, glm::vec3( 0.0f , 0.0f, sinf(cosf(5.0f*idx.x*idx.y*0*elapse) ) ) ) * windFactor ; totalForce += windForce; //Verlet Integration newPos = 2.0f*pos[offset] - pos2[offset] + (dt*dt*totalForce/MASS); //apply stretch constraints //if( idx.y > 1 ) //{ // diff = glm::distance( newPos, pos[ (idx.y-1)*(massXNum)+idx.x] ) - 1.1f * restlen.x; // if( glm::distance( newPos, pos[(idx.y-1)*(massXNum)+idx.x] ) > 1.1f * restlen.x ) // newPos = pos[(idx.y-1)*massXNum+idx.x] + glm::normalize( newPos - pos[(idx.y-1)*massXNum+idx.x] ) * 1.1f * restlen.x; //} //if( idx.x > 1 ) //{ // if( glm::distance( newPos, pos[idx.y*massXNum+idx.x-1] ) > 1.1f * restlen.x ) // newPos = pos[idx.y*massXNum+idx.x-1] + glm::normalize( newPos - pos[idx.y*massXNum+idx.x-1] ) * 1.1f * restlen.x; //} //if( idx.y < massYNum - 1 ) //{ // if( glm::distance( newPos, pos[(idx.y+1)*(massXNum) + idx.x] ) > 1.1f * restlen.x ) // newPos = pos[(idx.y+1)*(massXNum) + idx.x] + glm::normalize( newPos - pos[(idx.y+1)*(massXNum) + idx.x] ) * 1.1f * restlen.x; //} //if( idx.x < massXNum - 1 ) //{ // if( glm::distance( newPos, pos[idx.y*massXNum+idx.x+1] ) > 1.1f * restlen.x ) // newPos = pos[idx.y*massXNum+idx.x+1] + glm::normalize( newPos - pos[idx.y*massXNum+idx.x+1] ) * 1.1f * restlen.x; //} if( glm::distance( newPos, sphere ) < radius+0.05f ) newPos = sphere + glm::normalize( newPos-sphere) * (radius+0.05f); pos2[offset] = pos[offset]; pos[offset] = newPos; } syncthreads(); if(idx.x < massXNum && idx.y < massYNum) { vertexData[ offset ].x = newPos.x; vertexData[ offset ].y = newPos.y; vertexData[ offset ].z = newPos.z; } } __global__ void updateInterPosKernel( float4* vertexData, glm::vec3* pos, int massXNum, int massYNum ) { int2 idx; int offset; idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; if( idx.x < massXNum && idx.y < massYNum ) { pos[offset].x = vertexData[offset].x; pos[offset].y = vertexData[offset].y; pos[offset].z = vertexData[offset].z; } } void updatePosWrapper( cudaGraphicsResource* &vboResource, cudaGraphicsResource* normalVboSrc, float dt, float elapse, float windFactor, float2 restlen, int massXNum, int massYNum, glm::vec3 &sphere, float radius ) { size_t vboSize; dim3 blockSize = dim3(16*16); dim3 gridSize = dim3( (massXNum + blockSize.x-1)/blockSize.x, (massYNum + blockSize.y-1)/blockSize.y ); cudaErrorCheck( cudaGraphicsMapResources( 1, &vboResource, 0 ) ); cudaErrorCheck( cudaGraphicsResourceGetMappedPointer((void**) &d_vertexData, &vboSize, vboResource ) ); cudaErrorCheck( cudaGraphicsMapResources( 1, &normalVboSrc, 0 ) ); cudaErrorCheck( cudaGraphicsResourceGetMappedPointer((void**) &d_normal, &vboSize, normalVboSrc ) ); updatePosKernel<<<gridSize,blockSize>>>( (float4*)d_vertexData,d_pos, d_pos2, d_force, (float3*)d_normal, dt, elapse, windFactor, restlen, massXNum, massYNum, sphere, radius ); updateInterPosKernel<<<gridSize,blockSize>>>( (float4*)d_vertexData, d_pos, massXNum, massYNum ); cudaErrorCheck( cudaGraphicsUnmapResources( 1, &vboResource, 0 ) ); cudaErrorCheck( cudaGraphicsUnmapResources( 1, &normalVboSrc, 0 ) ); } __global__ void updateNormalKernel( float3* normals, glm::vec3* pos, int massXNum, int massYNum ) { int2 idx; int offset; glm::vec3 normal(0.0f, 0.0f, 1.0f ); idx.x = blockIdx.x * blockDim.x + threadIdx.x; idx.y = blockIdx.y * blockDim.y + threadIdx.y; offset = idx.y * massXNum + idx.x; if( idx.x < massXNum-1 && idx.y < massYNum-1 ) { normal = glm::cross( pos[offset+ massXNum] - pos[offset], pos[offset+1] - pos[offset] ); normal = glm::normalize( normal ); } else if( idx.x == massXNum - 1 && idx.y < massYNum-1 ) { normal = glm::cross( pos[offset-1]-pos[offset] , pos[offset+ massXNum ] - pos[offset] ); normal = glm::normalize( normal ); } else if( idx.y == massYNum - 1 && idx.x< massXNum-1 ) { normal = glm::cross( pos[offset+1] - pos[offset], pos[offset- massXNum ] - pos[offset] ); normal = glm::normalize( normal ); } else if( idx.x == massXNum-1 && idx.y == massYNum-1 ) { normal = glm::cross( pos[offset-massXNum]-pos[offset], pos[offset- 1 ]- pos[offset] ); normal = glm::normalize( normal ); } if( idx.x < massXNum && idx.y < massYNum ) { normals[offset].x = normal.x; normals[offset].y = normal.y; normals[offset].z = normal.z; } } void updateNormalWrapper( cudaGraphicsResource* &normalVboSrc, int massXNum, int massYNum ) { size_t vboSize; dim3 blockSize = dim3(16*16); dim3 gridSize = dim3( (massXNum + blockSize.x-1)/blockSize.x, (massYNum + blockSize.y-1)/blockSize.y ); cudaErrorCheck( cudaGraphicsMapResources( 1, &normalVboSrc, 0 ) ); cudaErrorCheck( cudaGraphicsResourceGetMappedPointer((void**) &d_normal, &vboSize, normalVboSrc ) ); updateNormalKernel<<< gridSize, blockSize >>>( (float3*)d_normal, d_pos, massXNum, massYNum ); cudaErrorCheck( cudaGraphicsUnmapResources( 1, &normalVboSrc, 0 ) ); }
01b5dc4d3e2000916c52478e299b8235e1856025.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
01b5dc4d3e2000916c52478e299b8235e1856025.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
9fa9f0a5a8eca7e8749805af019cf70713fa1a7d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <algorithm> #include <random> #include <chrono> #include <sstream> #include <exception> #include <string> #include <cstddef> #include <iomanip> #include <hip/hip_runtime.h> #include <thrust/scan.h> #include "../helpers.hpp" namespace cpu { template <class T> void inclusive_scan(const T* RESTRICT input, T* RESTRICT output, int N) { output[0] = input[0]; for (int i = 1; i < N; i++) output[i] = output[i - 1] + input[i]; } template <class T> void inclusive_scan_stl(const T* RESTRICT input, T* RESTRICT output, int N) { std::inclusive_scan(input, input + N, output); } template <class T> void inclusive_scan_parallel(const T* RESTRICT input, T* RESTRICT output, int N) { constexpr int ELEMENTS_PER_ITERATION = 1024; auto blelloch_scan_power_of_2 = [](T* buffer) { // Up-Sweep Step: // indices: 0 1 2 3 4 5 6 7 // input: 1 2 3 4 5 6 7 8 // skip-2: 3 7 11 15 // skip-4: 10 26 // skip-8: 36 // up-sweeep: 1 3 3 10 5 11 7 36 // // in skip-K, we access [K, K * 2, K * 3, ...] until we exceed N // in skip-K, the children of 'i' is 'i' and 'i' - K / 2 for (int skip = 2; skip <= ELEMENTS_PER_ITERATION; skip *= 2) for (int i = skip - 1; i < ELEMENTS_PER_ITERATION; i += skip) buffer[i] += buffer[i - skip / 2]; auto total = buffer[ELEMENTS_PER_ITERATION - 1]; buffer[ELEMENTS_PER_ITERATION - 1] = 0; // Down-Sweep Step (Exclusive Scan): // N = current node, LC(X) = left child of x, RC(X) = right child of X, M(X) = mirror node of X in the up-sweep tree // // New LC = N // New RC = N + LC(M(N)) // // Trick 1: mirror node sits at the same index in the input // Trick 2: in skip-N, the right child is at the same index and left child is K indices behind // Trick 3: if you go a depth, you have to use skip-(K/2) for children; for example, LC(M(N)) is skip-(K/2) from M(N) // // In summary, to recursively update, read N, save LC(M(N)) (K / 2 indices behind), update the LC(N) and then RC(N) (same index as N). // We save LC because it shares the index with LC(M(N))) which is required to compute RC(N). // // indices: 0 1 2 3 4 5 6 7 // input: 1 3 3 10 5 11 7 36 // preprocess: 1 3 3 10 5 11 7 0 // skip-8: 0 10 (N = 0, LC(M(N)) = 10) // skip-4: 0 3 10 21 // skip-2: 0 1 3 6 10 15 21 28 for(int skip = ELEMENTS_PER_ITERATION; skip > 1; skip /= 2) { for (int i = skip - 1; i < ELEMENTS_PER_ITERATION; i += skip) { auto current = buffer[i]; auto lc_mirror = buffer[i - skip / 2]; buffer[i - skip / 2] = current; buffer[i] = current + lc_mirror; } } // convert to inclusive scan for (int i = 1; i < ELEMENTS_PER_ITERATION; i++) buffer[i - 1] = buffer[i]; buffer[ELEMENTS_PER_ITERATION - 1] = total; return total; }; std::vector<T> sub_totals; int num_full_iterations = N / ELEMENTS_PER_ITERATION; for (int iter = 0; iter < num_full_iterations; iter++) { auto input_start = input + iter * ELEMENTS_PER_ITERATION; auto output_start = output + iter * ELEMENTS_PER_ITERATION; for (int i = 0; i < ELEMENTS_PER_ITERATION; i++) output_start[i] = input_start[i]; auto sub_total = blelloch_scan_power_of_2(output_start); sub_totals.push_back(sub_total); } if (num_full_iterations * ELEMENTS_PER_ITERATION < N) { // scan trailing block auto starting_offset = num_full_iterations * ELEMENTS_PER_ITERATION; auto ending_offset = N; auto range_size = ending_offset - starting_offset; auto input_start = input + starting_offset; auto output_start = output + starting_offset; output_start[0] = input_start[0]; for (int i = 1; i < range_size; i++) output_start[i] = output_start[i - 1] + input_start[i]; sub_totals.push_back(output_start[range_size - 1]); } if (sub_totals.size() > 1) { inclusive_scan_parallel(sub_totals.data(), sub_totals.data(), sub_totals.size()); for (int iter = 1; iter <= sub_totals.size(); iter++) { auto residual = sub_totals[iter - 1]; auto starting_offset = iter * ELEMENTS_PER_ITERATION; auto ending_offset = min(N, starting_offset + ELEMENTS_PER_ITERATION); auto range_size = ending_offset - starting_offset; auto input_start = input + starting_offset; auto output_start = output + starting_offset; for (int i = 0; i < range_size; i++) output_start[i] += residual; } } } } namespace thrust_gpu { template <class T> void inclusive_scan(const T* CUDA_RESTRICT input, T* CUDA_RESTRICT output, int N) { thrust::plus<T> plus; thrust::inclusive_scan(thrust::device, input, input + N, output, plus); } } namespace gpu { template <class T> __global__ void hillis_steele_scan(const T* CUDA_RESTRICT input, T* CUDA_RESTRICT buffer, T* CUDA_RESTRICT output, int N) { const T* buf_in = input; T* buf_out = buffer; for (int scanned_till = 1; scanned_till < N; scanned_till *= 2) { #pragma unroll 8 for (int i = threadIdx.x; i < N; i += blockDim.x) { if (i < scanned_till) buf_out[i] = buf_in[i]; else buf_out[i] = buf_in[i] + buf_in[i - scanned_till]; } __syncthreads(); if (buf_out == buffer) { buf_in = buffer; buf_out = output; } else { buf_in = output; buf_out = buffer; } } if (buf_in != output) { for (int i = threadIdx.x; i < N; i += blockDim.x) output[i] = buffer[i]; } } template <class T> void inclusive_scan_v1(const T* d_input, T* d_output, int N) { T* buffer; CHECK_CUDA(hipMalloc(&buffer, N * sizeof(T))); hipLaunchKernelGGL(( gpu::hillis_steele_scan), dim3(1), dim3(1024), 0, 0, d_input, buffer, d_output, N); CHECK_CUDA(hipGetLastError()); CHECK_CUDA(hipFree(buffer)); } template <class T, int BLOCK_SIZE, int ELEMENTS_PER_ITERATION> __global__ __launch_bounds__(BLOCK_SIZE) void blelloch_scan_power_of_2(const T* input, T* sub_totals, T* output, int N) { auto starting_offset = blockIdx.x * ELEMENTS_PER_ITERATION; auto ending_offset = min(N, starting_offset + ELEMENTS_PER_ITERATION); auto range_size = ending_offset - starting_offset; input += starting_offset; output += starting_offset; constexpr int NUM_BANKS = 32; __shared__ T buffer[ELEMENTS_PER_ITERATION + ELEMENTS_PER_ITERATION / NUM_BANKS]; auto access_buffer = [](int idx) -> T& { return buffer[idx + (idx / NUM_BANKS)]; }; for (int i = threadIdx.x; i < ELEMENTS_PER_ITERATION; i += BLOCK_SIZE) access_buffer(i) = (i < range_size) ? input[i] : 0; // Up-Sweep Step: // indices: 0 1 2 3 4 5 6 7 // input: 1 2 3 4 5 6 7 8 // skip-2: 3 7 11 15 // skip-4: 10 26 // skip-8: 36 // up-sweeep: 1 3 3 10 5 11 7 36 // // in skip-K, we access [K, K * 2, K * 3, ...] until we exceed N // in skip-K, the children of 'i' is 'i' and 'i' - K / 2 #pragma unroll for (int skip = 2; skip <= ELEMENTS_PER_ITERATION; skip *= 2) { __syncthreads(); for (int i = threadIdx.x; i < ELEMENTS_PER_ITERATION / skip; i += BLOCK_SIZE) { auto idx = (i + 1) * skip - 1; access_buffer(idx) += access_buffer(idx - skip / 2); } } T total = 0; if (threadIdx.x == 0) { total = access_buffer(ELEMENTS_PER_ITERATION - 1); access_buffer(ELEMENTS_PER_ITERATION - 1) = 0; } // Down-Sweep Step (Exclusive Scan): // N = current node, LC(X) = left child of x, RC(X) = right child of X, M(X) = mirror node of X in the up-sweep tree // // New LC = N // New RC = N + LC(M(N)) // // Trick 1: mirror node sits at the same index in the input // Trick 2: in skip-N, the right child is at the same index and left child is K indices behind // Trick 3: if you go a depth, you have to use skip-(K/2) for children; for example, LC(M(N)) is skip-(K/2) from M(N) // // In summary, to recursively update, read N, save LC(M(N)) (K / 2 indices behind), update the LC(N) and then RC(N) (same index as N). // We save LC because it shares the index with LC(M(N))) which is required to compute RC(N). // // indices: 0 1 2 3 4 5 6 7 // input: 1 3 3 10 5 11 7 36 // preprocess: 1 3 3 10 5 11 7 0 // skip-8: 0 10 (N = 0, LC(M(N)) = 10) // skip-4: 0 3 10 21 // skip-2: 0 1 3 6 10 15 21 28 #pragma unroll for(int skip = ELEMENTS_PER_ITERATION; skip > 1; skip /= 2) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < ELEMENTS_PER_ITERATION / skip; i += BLOCK_SIZE) { auto idx = (i + 1) * skip - 1; auto current = access_buffer(idx); auto lc_mirror = access_buffer(idx - skip / 2); access_buffer(idx - skip / 2) = current; access_buffer(idx) = current + lc_mirror; } } __syncthreads(); // convert to inclusive scan for (int i = threadIdx.x; i < range_size - 1; i += BLOCK_SIZE) output[i] = access_buffer(i + 1); if (threadIdx.x == 0) { output[range_size - 1] = total; if (sub_totals) sub_totals[blockIdx.x] = total; } } template <class T, int BLOCK_SIZE, int ELEMENTS_PER_ITERATION> __global__ void correct_block_scans(const T* input, const T* sub_totals, T* output, int N) { auto starting_offset = (blockIdx.x + 1) * ELEMENTS_PER_ITERATION; auto ending_offset = min(N, starting_offset + ELEMENTS_PER_ITERATION); auto range_size = ending_offset - starting_offset; input += starting_offset; output += starting_offset; auto residual = sub_totals[blockIdx.x]; for (int i = threadIdx.x; i < range_size; i += BLOCK_SIZE) output[i] += residual; } template <class T> void inclusive_scan_v2(const T* input, T* output, int N) { constexpr int BLOCK_SIZE = 64; constexpr int ELEMENTS_PER_ITERATION = 512; int num_iterations = (N + ELEMENTS_PER_ITERATION - 1) / ELEMENTS_PER_ITERATION; T* sub_totals = nullptr; if (num_iterations > 1) { CHECK_CUDA(hipMalloc(&sub_totals, num_iterations * sizeof(T))); } hipLaunchKernelGGL(( blelloch_scan_power_of_2<T, BLOCK_SIZE, ELEMENTS_PER_ITERATION>), dim3(num_iterations), dim3(BLOCK_SIZE), 0, 0, input, sub_totals, output, N); CHECK_CUDA(hipGetLastError()); if (num_iterations > 1) { inclusive_scan_v2(sub_totals, sub_totals, num_iterations); hipLaunchKernelGGL(( correct_block_scans<T, BLOCK_SIZE, ELEMENTS_PER_ITERATION>), dim3(num_iterations - 1), dim3(BLOCK_SIZE), 0, 0, input, sub_totals, output, N); CHECK_CUDA(hipGetLastError()); CHECK_CUDA(hipDeviceSynchronize()); CHECK_CUDA(hipFree(sub_totals)); } } } int main () { using T = double; const int N = 10'000'000; constexpr float THRESHOLD = 0.001; std::vector<T> input(N); random_fill(std::begin(input), std::end(input)); T* d_input, *d_output; CHECK_CUDA(hipMalloc(&d_input, input.size() * sizeof(T))); CHECK_CUDA(hipMalloc(&d_output, input.size() * sizeof(T))); std::vector<T> output_cpu(N); auto cpu_time = benchmark([&input, &output_cpu, N] { cpu::inclusive_scan_parallel(input.data(), output_cpu.data(), N); }); std::cout << "[CPU] Running time: " << to_milliseconds(cpu_time).count() << "ms\n"; std::cout << std::endl; std::vector<T> output_thrust(N); auto thrust_gpu_time = benchmark([&] { CHECK_CUDA(hipMemcpy(d_input, input.data(), input.size() * sizeof(T), hipMemcpyHostToDevice)); thrust_gpu::inclusive_scan(d_input, d_output, N); CHECK_CUDA(hipGetLastError()); CHECK_CUDA(hipMemcpy(output_thrust.data(), d_output, output_thrust.size() * sizeof(T), hipMemcpyDeviceToHost)); }); std::cout << "[thrust] Running time (incl. memory copy): " << to_milliseconds(thrust_gpu_time).count() << "ms\n"; print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_thrust), THRESHOLD); std::cout << std::endl; // erase previous results CHECK_CUDA(hipMemset(d_output, 0, input.size() * sizeof(T))); std::vector<T> output_gpu(N); auto gpu_time = benchmark([&] { CHECK_CUDA(hipMemcpy(d_input, input.data(), input.size() * sizeof(T), hipMemcpyHostToDevice)); gpu::inclusive_scan_v2(d_input, d_output, N); CHECK_CUDA(hipMemcpy(output_gpu.data(), d_output, output_gpu.size() * sizeof(T), hipMemcpyDeviceToHost)); }); std::cout << "[GPU] Running time (incl. memory copy): " << to_milliseconds(gpu_time).count() << "ms" << std::endl; print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_gpu), THRESHOLD); CHECK_CUDA(hipFree(d_input)); CHECK_CUDA(hipFree(d_output)); return 0; }
9fa9f0a5a8eca7e8749805af019cf70713fa1a7d.cu
#include <iostream> #include <vector> #include <algorithm> #include <random> #include <chrono> #include <sstream> #include <exception> #include <string> #include <cstddef> #include <iomanip> #include <cuda_runtime.h> #include <thrust/scan.h> #include "../helpers.hpp" namespace cpu { template <class T> void inclusive_scan(const T* RESTRICT input, T* RESTRICT output, int N) { output[0] = input[0]; for (int i = 1; i < N; i++) output[i] = output[i - 1] + input[i]; } template <class T> void inclusive_scan_stl(const T* RESTRICT input, T* RESTRICT output, int N) { std::inclusive_scan(input, input + N, output); } template <class T> void inclusive_scan_parallel(const T* RESTRICT input, T* RESTRICT output, int N) { constexpr int ELEMENTS_PER_ITERATION = 1024; auto blelloch_scan_power_of_2 = [](T* buffer) { // Up-Sweep Step: // indices: 0 1 2 3 4 5 6 7 // input: 1 2 3 4 5 6 7 8 // skip-2: 3 7 11 15 // skip-4: 10 26 // skip-8: 36 // up-sweeep: 1 3 3 10 5 11 7 36 // // in skip-K, we access [K, K * 2, K * 3, ...] until we exceed N // in skip-K, the children of 'i' is 'i' and 'i' - K / 2 for (int skip = 2; skip <= ELEMENTS_PER_ITERATION; skip *= 2) for (int i = skip - 1; i < ELEMENTS_PER_ITERATION; i += skip) buffer[i] += buffer[i - skip / 2]; auto total = buffer[ELEMENTS_PER_ITERATION - 1]; buffer[ELEMENTS_PER_ITERATION - 1] = 0; // Down-Sweep Step (Exclusive Scan): // N = current node, LC(X) = left child of x, RC(X) = right child of X, M(X) = mirror node of X in the up-sweep tree // // New LC = N // New RC = N + LC(M(N)) // // Trick 1: mirror node sits at the same index in the input // Trick 2: in skip-N, the right child is at the same index and left child is K indices behind // Trick 3: if you go a depth, you have to use skip-(K/2) for children; for example, LC(M(N)) is skip-(K/2) from M(N) // // In summary, to recursively update, read N, save LC(M(N)) (K / 2 indices behind), update the LC(N) and then RC(N) (same index as N). // We save LC because it shares the index with LC(M(N))) which is required to compute RC(N). // // indices: 0 1 2 3 4 5 6 7 // input: 1 3 3 10 5 11 7 36 // preprocess: 1 3 3 10 5 11 7 0 // skip-8: 0 10 (N = 0, LC(M(N)) = 10) // skip-4: 0 3 10 21 // skip-2: 0 1 3 6 10 15 21 28 for(int skip = ELEMENTS_PER_ITERATION; skip > 1; skip /= 2) { for (int i = skip - 1; i < ELEMENTS_PER_ITERATION; i += skip) { auto current = buffer[i]; auto lc_mirror = buffer[i - skip / 2]; buffer[i - skip / 2] = current; buffer[i] = current + lc_mirror; } } // convert to inclusive scan for (int i = 1; i < ELEMENTS_PER_ITERATION; i++) buffer[i - 1] = buffer[i]; buffer[ELEMENTS_PER_ITERATION - 1] = total; return total; }; std::vector<T> sub_totals; int num_full_iterations = N / ELEMENTS_PER_ITERATION; for (int iter = 0; iter < num_full_iterations; iter++) { auto input_start = input + iter * ELEMENTS_PER_ITERATION; auto output_start = output + iter * ELEMENTS_PER_ITERATION; for (int i = 0; i < ELEMENTS_PER_ITERATION; i++) output_start[i] = input_start[i]; auto sub_total = blelloch_scan_power_of_2(output_start); sub_totals.push_back(sub_total); } if (num_full_iterations * ELEMENTS_PER_ITERATION < N) { // scan trailing block auto starting_offset = num_full_iterations * ELEMENTS_PER_ITERATION; auto ending_offset = N; auto range_size = ending_offset - starting_offset; auto input_start = input + starting_offset; auto output_start = output + starting_offset; output_start[0] = input_start[0]; for (int i = 1; i < range_size; i++) output_start[i] = output_start[i - 1] + input_start[i]; sub_totals.push_back(output_start[range_size - 1]); } if (sub_totals.size() > 1) { inclusive_scan_parallel(sub_totals.data(), sub_totals.data(), sub_totals.size()); for (int iter = 1; iter <= sub_totals.size(); iter++) { auto residual = sub_totals[iter - 1]; auto starting_offset = iter * ELEMENTS_PER_ITERATION; auto ending_offset = min(N, starting_offset + ELEMENTS_PER_ITERATION); auto range_size = ending_offset - starting_offset; auto input_start = input + starting_offset; auto output_start = output + starting_offset; for (int i = 0; i < range_size; i++) output_start[i] += residual; } } } } namespace thrust_gpu { template <class T> void inclusive_scan(const T* CUDA_RESTRICT input, T* CUDA_RESTRICT output, int N) { thrust::plus<T> plus; thrust::inclusive_scan(thrust::device, input, input + N, output, plus); } } namespace gpu { template <class T> __global__ void hillis_steele_scan(const T* CUDA_RESTRICT input, T* CUDA_RESTRICT buffer, T* CUDA_RESTRICT output, int N) { const T* buf_in = input; T* buf_out = buffer; for (int scanned_till = 1; scanned_till < N; scanned_till *= 2) { #pragma unroll 8 for (int i = threadIdx.x; i < N; i += blockDim.x) { if (i < scanned_till) buf_out[i] = buf_in[i]; else buf_out[i] = buf_in[i] + buf_in[i - scanned_till]; } __syncthreads(); if (buf_out == buffer) { buf_in = buffer; buf_out = output; } else { buf_in = output; buf_out = buffer; } } if (buf_in != output) { for (int i = threadIdx.x; i < N; i += blockDim.x) output[i] = buffer[i]; } } template <class T> void inclusive_scan_v1(const T* d_input, T* d_output, int N) { T* buffer; CHECK_CUDA(cudaMalloc(&buffer, N * sizeof(T))); gpu::hillis_steele_scan<<<1, 1024>>>(d_input, buffer, d_output, N); CHECK_CUDA(cudaGetLastError()); CHECK_CUDA(cudaFree(buffer)); } template <class T, int BLOCK_SIZE, int ELEMENTS_PER_ITERATION> __global__ __launch_bounds__(BLOCK_SIZE) void blelloch_scan_power_of_2(const T* input, T* sub_totals, T* output, int N) { auto starting_offset = blockIdx.x * ELEMENTS_PER_ITERATION; auto ending_offset = min(N, starting_offset + ELEMENTS_PER_ITERATION); auto range_size = ending_offset - starting_offset; input += starting_offset; output += starting_offset; constexpr int NUM_BANKS = 32; __shared__ T buffer[ELEMENTS_PER_ITERATION + ELEMENTS_PER_ITERATION / NUM_BANKS]; auto access_buffer = [](int idx) -> T& { return buffer[idx + (idx / NUM_BANKS)]; }; for (int i = threadIdx.x; i < ELEMENTS_PER_ITERATION; i += BLOCK_SIZE) access_buffer(i) = (i < range_size) ? input[i] : 0; // Up-Sweep Step: // indices: 0 1 2 3 4 5 6 7 // input: 1 2 3 4 5 6 7 8 // skip-2: 3 7 11 15 // skip-4: 10 26 // skip-8: 36 // up-sweeep: 1 3 3 10 5 11 7 36 // // in skip-K, we access [K, K * 2, K * 3, ...] until we exceed N // in skip-K, the children of 'i' is 'i' and 'i' - K / 2 #pragma unroll for (int skip = 2; skip <= ELEMENTS_PER_ITERATION; skip *= 2) { __syncthreads(); for (int i = threadIdx.x; i < ELEMENTS_PER_ITERATION / skip; i += BLOCK_SIZE) { auto idx = (i + 1) * skip - 1; access_buffer(idx) += access_buffer(idx - skip / 2); } } T total = 0; if (threadIdx.x == 0) { total = access_buffer(ELEMENTS_PER_ITERATION - 1); access_buffer(ELEMENTS_PER_ITERATION - 1) = 0; } // Down-Sweep Step (Exclusive Scan): // N = current node, LC(X) = left child of x, RC(X) = right child of X, M(X) = mirror node of X in the up-sweep tree // // New LC = N // New RC = N + LC(M(N)) // // Trick 1: mirror node sits at the same index in the input // Trick 2: in skip-N, the right child is at the same index and left child is K indices behind // Trick 3: if you go a depth, you have to use skip-(K/2) for children; for example, LC(M(N)) is skip-(K/2) from M(N) // // In summary, to recursively update, read N, save LC(M(N)) (K / 2 indices behind), update the LC(N) and then RC(N) (same index as N). // We save LC because it shares the index with LC(M(N))) which is required to compute RC(N). // // indices: 0 1 2 3 4 5 6 7 // input: 1 3 3 10 5 11 7 36 // preprocess: 1 3 3 10 5 11 7 0 // skip-8: 0 10 (N = 0, LC(M(N)) = 10) // skip-4: 0 3 10 21 // skip-2: 0 1 3 6 10 15 21 28 #pragma unroll for(int skip = ELEMENTS_PER_ITERATION; skip > 1; skip /= 2) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < ELEMENTS_PER_ITERATION / skip; i += BLOCK_SIZE) { auto idx = (i + 1) * skip - 1; auto current = access_buffer(idx); auto lc_mirror = access_buffer(idx - skip / 2); access_buffer(idx - skip / 2) = current; access_buffer(idx) = current + lc_mirror; } } __syncthreads(); // convert to inclusive scan for (int i = threadIdx.x; i < range_size - 1; i += BLOCK_SIZE) output[i] = access_buffer(i + 1); if (threadIdx.x == 0) { output[range_size - 1] = total; if (sub_totals) sub_totals[blockIdx.x] = total; } } template <class T, int BLOCK_SIZE, int ELEMENTS_PER_ITERATION> __global__ void correct_block_scans(const T* input, const T* sub_totals, T* output, int N) { auto starting_offset = (blockIdx.x + 1) * ELEMENTS_PER_ITERATION; auto ending_offset = min(N, starting_offset + ELEMENTS_PER_ITERATION); auto range_size = ending_offset - starting_offset; input += starting_offset; output += starting_offset; auto residual = sub_totals[blockIdx.x]; for (int i = threadIdx.x; i < range_size; i += BLOCK_SIZE) output[i] += residual; } template <class T> void inclusive_scan_v2(const T* input, T* output, int N) { constexpr int BLOCK_SIZE = 64; constexpr int ELEMENTS_PER_ITERATION = 512; int num_iterations = (N + ELEMENTS_PER_ITERATION - 1) / ELEMENTS_PER_ITERATION; T* sub_totals = nullptr; if (num_iterations > 1) { CHECK_CUDA(cudaMalloc(&sub_totals, num_iterations * sizeof(T))); } blelloch_scan_power_of_2<T, BLOCK_SIZE, ELEMENTS_PER_ITERATION><<<num_iterations, BLOCK_SIZE>>>(input, sub_totals, output, N); CHECK_CUDA(cudaGetLastError()); if (num_iterations > 1) { inclusive_scan_v2(sub_totals, sub_totals, num_iterations); correct_block_scans<T, BLOCK_SIZE, ELEMENTS_PER_ITERATION><<<num_iterations - 1, BLOCK_SIZE>>>(input, sub_totals, output, N); CHECK_CUDA(cudaGetLastError()); CHECK_CUDA(cudaDeviceSynchronize()); CHECK_CUDA(cudaFree(sub_totals)); } } } int main () { using T = double; const int N = 10'000'000; constexpr float THRESHOLD = 0.001; std::vector<T> input(N); random_fill(std::begin(input), std::end(input)); T* d_input, *d_output; CHECK_CUDA(cudaMalloc(&d_input, input.size() * sizeof(T))); CHECK_CUDA(cudaMalloc(&d_output, input.size() * sizeof(T))); std::vector<T> output_cpu(N); auto cpu_time = benchmark([&input, &output_cpu, N] { cpu::inclusive_scan_parallel(input.data(), output_cpu.data(), N); }); std::cout << "[CPU] Running time: " << to_milliseconds(cpu_time).count() << "ms\n"; std::cout << std::endl; std::vector<T> output_thrust(N); auto thrust_gpu_time = benchmark([&] { CHECK_CUDA(cudaMemcpy(d_input, input.data(), input.size() * sizeof(T), cudaMemcpyHostToDevice)); thrust_gpu::inclusive_scan(d_input, d_output, N); CHECK_CUDA(cudaGetLastError()); CHECK_CUDA(cudaMemcpy(output_thrust.data(), d_output, output_thrust.size() * sizeof(T), cudaMemcpyDeviceToHost)); }); std::cout << "[thrust] Running time (incl. memory copy): " << to_milliseconds(thrust_gpu_time).count() << "ms\n"; print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_thrust), THRESHOLD); std::cout << std::endl; // erase previous results CHECK_CUDA(cudaMemset(d_output, 0, input.size() * sizeof(T))); std::vector<T> output_gpu(N); auto gpu_time = benchmark([&] { CHECK_CUDA(cudaMemcpy(d_input, input.data(), input.size() * sizeof(T), cudaMemcpyHostToDevice)); gpu::inclusive_scan_v2(d_input, d_output, N); CHECK_CUDA(cudaMemcpy(output_gpu.data(), d_output, output_gpu.size() * sizeof(T), cudaMemcpyDeviceToHost)); }); std::cout << "[GPU] Running time (incl. memory copy): " << to_milliseconds(gpu_time).count() << "ms" << std::endl; print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_gpu), THRESHOLD); CHECK_CUDA(cudaFree(d_input)); CHECK_CUDA(cudaFree(d_output)); return 0; }
c67fbae7a659eb40b2f2065a917fa5e2356510cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * Find BLANK and replace your own code. * And submit report why do you replace the blank that way. */ /* 2015004693_YangSangheon */ #define TILE_WIDTH 24 /* set TILE_WIDTH 16 for the evaluation! */ #define MAXPOOL_INPUT_FILENAME "input.txt" #define A_FILENAME "a.txt" #define B_FILENAME "b.txt" #define C_FILENAME "c.txt" using namespace std; __global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) { // input : input_matrix address // output : output buffer address // input_size : width, height of input matrix // filter_size : filter_size of maxpolling // all input, output matrices are vectorized int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; // out of bound // CHANGE float tmp = 0.0; float Max = -999999.9; for(int i = 0; i < filter_size; i++){ for(int j = 0; j < filter_size; j++){ tmp = input[(input_size*filter_size*row)+(filter_size*col)+(input_size*j)+i]; if(Max<tmp) Max = tmp; } } if(col < (input_size/filter_size) && row < (input_size/filter_size)) output[((input_size/filter_size)*row)+col] = Max; //printf("thread_made\n"); }
c67fbae7a659eb40b2f2065a917fa5e2356510cc.cu
#include "includes.h" /* * Find BLANK and replace your own code. * And submit report why do you replace the blank that way. */ /* 2015004693_YangSangheon */ #define TILE_WIDTH 24 /* set TILE_WIDTH 16 for the evaluation! */ #define MAXPOOL_INPUT_FILENAME "input.txt" #define A_FILENAME "a.txt" #define B_FILENAME "b.txt" #define C_FILENAME "c.txt" using namespace std; __global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) { // input : input_matrix address // output : output buffer address // input_size : width, height of input matrix // filter_size : filter_size of maxpolling // all input, output matrices are vectorized int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; // out of bound // CHANGE float tmp = 0.0; float Max = -999999.9; for(int i = 0; i < filter_size; i++){ for(int j = 0; j < filter_size; j++){ tmp = input[(input_size*filter_size*row)+(filter_size*col)+(input_size*j)+i]; if(Max<tmp) Max = tmp; } } if(col < (input_size/filter_size) && row < (input_size/filter_size)) output[((input_size/filter_size)*row)+col] = Max; //printf("thread_made\n"); }
21737a51bb586924fbd34ad976d16f90f50fe72c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 Gregory Meyer // // Permission is hereby granted, free of charge, to any person // obtaining a copy of this software and associated documentation // files (the "Software"), to deal in the Software without // restriction, including without limitation the rights to use, copy, // modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice (including // the next paragraph) shall be included in all copies or substantial // portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. #include <simt_tf/simt_tf.h> #include "err.cuh" #include <cassert> #include <cmath> #include <atomic> #include <mutex> namespace { __host__ __device__ bool isnan(const sl::float4 &v) noexcept { return std::isnan(v[0]) || std::isnan(v[1]) || std::isnan(v[2]); } __host__ __device__ std::uint32_t as_u32(float x) noexcept { union Converter { float f; std::uint32_t i; }; return Converter{x}.i; } constexpr std::size_t div_to_inf(std::size_t x, std::size_t y) noexcept { const std::size_t res = x / y; if (x % y != 0) { return res + 1; } return res; } /** * @param input Points to an array of length n. Each element must be * a 4-tuple (X, Y, Z, RGBA) corresponding to a depth * map. The RGBA component is packed into a 32-bit float, * with each element being an 8-bit unsigned integer. * @param output Points to a matrix with m rows and p columns. Each * element must be a 4-tuple (B, G, R, A). The elements * should be stored contiguously in row-major order. * @param resolution The resolution (in meters) of each pixel in * output, such that each pixel represents a * (resolution x resolution) square. * @param x_offset The offset (in meters) between the center of the * matrix and its leftmost edge, such that the pixel * at (0, 0) is located in free space at (-x_offset, * -y_offset). * @param y_offset The offset (in meters) between the center of the * matrix and its topmost edge, such that the pixel * at (0, 0) is located in free space at (-x_offset, * -y_offset). */ __global__ void transform( simt_tf::Transform tf, const sl::float4 *input, std::uint32_t n, std::uint32_t *output, std::uint32_t output_rows, std::uint32_t output_cols, std::uint32_t output_stride, float resolution, float x_offset, float y_offset ) { const std::uint32_t pixel_idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (pixel_idx >= n) { return; } const sl::float4 elem = input[pixel_idx]; if (isnan(elem)) { return; } const simt_tf::Vector transformed = tf({elem[0], elem[1], elem[2]}); const float pixel_x = (transformed.x() + x_offset) / resolution; const float pixel_y = (transformed.y() + y_offset) / resolution; if (pixel_x < 0 || pixel_y < 0) { return; } const auto col = static_cast<std::uint32_t>(pixel_x); const auto row = static_cast<std::uint32_t>(pixel_y); if (col >= output_cols || row >= output_rows) { return; } const std::uint32_t output_idx = row * output_stride + col; output[output_idx] = as_u32(elem[3]); } } // namespace namespace simt_tf { /** * Fetches the left-side pointcloud from a ZED camera, then performs a * GPU-accelerated transform (translate then rotate) on each point and * projects it into a bird's eye view. * * @param tf Must be a valid coordinate transform, meaning that the * determinant of its basis rotation matrix must be 1. * @param camera Must be opened, have the latest data grabbed, and * have support for depth images and pointclouds. * It is assumed that x is forward, y, is left, and * z is up, in accordance with ROS REP 103. * @param pointcloud The pointcloud to place the pointcloud retrieved * from the ZED. * @param output The matrix to place the bird's eye transformed image * in. * @param resolution The resolution, in whatever unit the ZED is * configured to output in, of each cell of the * output matrix. In other words, each cell * represents a (resolution x resolution) square in * free space. * * @throws std::system_error if any ZED SDK or CUDA function call * fails. */ void pointcloud_birdseye( const Transform &tf, sl::Camera &camera, sl::Mat &pointcloud, cv::cuda::GpuMat &output, float resolution ) { assert(camera.isOpened()); hipCtxSetCurrent(camera.getCUDAContext()); const auto output_numel = static_cast<std::uint32_t>(output.size().area()); constexpr std::uint32_t BLOCKSIZE = 256; hipMemset(output.ptr<std::uint32_t>(), 0, output_numel * sizeof(std::uint32_t)); const std::error_code pc_ret = camera.retrieveMeasure(pointcloud, sl::MEASURE_XYZBGRA, sl::MEM_GPU); if (pc_ret) { throw std::system_error(pc_ret); } const auto output_cols = static_cast<std::uint32_t>(output.size().width); const auto output_rows = static_cast<std::uint32_t>(output.size().height); const auto output_stride = static_cast<std::uint32_t>(output.step / output.elemSize()); const float x_range = static_cast<float>(output_cols) * resolution; const float y_range = static_cast<float>(output_rows) * resolution; const auto numel = static_cast<std::uint32_t>(pointcloud.getResolution().area()); const std::uint32_t num_blocks = div_to_inf(numel, BLOCKSIZE); hipLaunchKernelGGL(( transform), dim3(num_blocks), dim3(BLOCKSIZE), 0, 0, tf, pointcloud.getPtr<sl::float4>(sl::MEM_GPU), numel, output.ptr<std::uint32_t>(), output_rows, output_cols, output_stride, resolution, x_range / 2, y_range / 2 ); } } // namespace simt_tf
21737a51bb586924fbd34ad976d16f90f50fe72c.cu
// Copyright (c) 2019 Gregory Meyer // // Permission is hereby granted, free of charge, to any person // obtaining a copy of this software and associated documentation // files (the "Software"), to deal in the Software without // restriction, including without limitation the rights to use, copy, // modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice (including // the next paragraph) shall be included in all copies or substantial // portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. #include <simt_tf/simt_tf.h> #include "err.cuh" #include <cassert> #include <cmath> #include <atomic> #include <mutex> namespace { __host__ __device__ bool isnan(const sl::float4 &v) noexcept { return std::isnan(v[0]) || std::isnan(v[1]) || std::isnan(v[2]); } __host__ __device__ std::uint32_t as_u32(float x) noexcept { union Converter { float f; std::uint32_t i; }; return Converter{x}.i; } constexpr std::size_t div_to_inf(std::size_t x, std::size_t y) noexcept { const std::size_t res = x / y; if (x % y != 0) { return res + 1; } return res; } /** * @param input Points to an array of length n. Each element must be * a 4-tuple (X, Y, Z, RGBA) corresponding to a depth * map. The RGBA component is packed into a 32-bit float, * with each element being an 8-bit unsigned integer. * @param output Points to a matrix with m rows and p columns. Each * element must be a 4-tuple (B, G, R, A). The elements * should be stored contiguously in row-major order. * @param resolution The resolution (in meters) of each pixel in * output, such that each pixel represents a * (resolution x resolution) square. * @param x_offset The offset (in meters) between the center of the * matrix and its leftmost edge, such that the pixel * at (0, 0) is located in free space at (-x_offset, * -y_offset). * @param y_offset The offset (in meters) between the center of the * matrix and its topmost edge, such that the pixel * at (0, 0) is located in free space at (-x_offset, * -y_offset). */ __global__ void transform( simt_tf::Transform tf, const sl::float4 *input, std::uint32_t n, std::uint32_t *output, std::uint32_t output_rows, std::uint32_t output_cols, std::uint32_t output_stride, float resolution, float x_offset, float y_offset ) { const std::uint32_t pixel_idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (pixel_idx >= n) { return; } const sl::float4 elem = input[pixel_idx]; if (isnan(elem)) { return; } const simt_tf::Vector transformed = tf({elem[0], elem[1], elem[2]}); const float pixel_x = (transformed.x() + x_offset) / resolution; const float pixel_y = (transformed.y() + y_offset) / resolution; if (pixel_x < 0 || pixel_y < 0) { return; } const auto col = static_cast<std::uint32_t>(pixel_x); const auto row = static_cast<std::uint32_t>(pixel_y); if (col >= output_cols || row >= output_rows) { return; } const std::uint32_t output_idx = row * output_stride + col; output[output_idx] = as_u32(elem[3]); } } // namespace namespace simt_tf { /** * Fetches the left-side pointcloud from a ZED camera, then performs a * GPU-accelerated transform (translate then rotate) on each point and * projects it into a bird's eye view. * * @param tf Must be a valid coordinate transform, meaning that the * determinant of its basis rotation matrix must be 1. * @param camera Must be opened, have the latest data grabbed, and * have support for depth images and pointclouds. * It is assumed that x is forward, y, is left, and * z is up, in accordance with ROS REP 103. * @param pointcloud The pointcloud to place the pointcloud retrieved * from the ZED. * @param output The matrix to place the bird's eye transformed image * in. * @param resolution The resolution, in whatever unit the ZED is * configured to output in, of each cell of the * output matrix. In other words, each cell * represents a (resolution x resolution) square in * free space. * * @throws std::system_error if any ZED SDK or CUDA function call * fails. */ void pointcloud_birdseye( const Transform &tf, sl::Camera &camera, sl::Mat &pointcloud, cv::cuda::GpuMat &output, float resolution ) { assert(camera.isOpened()); cuCtxSetCurrent(camera.getCUDAContext()); const auto output_numel = static_cast<std::uint32_t>(output.size().area()); constexpr std::uint32_t BLOCKSIZE = 256; cudaMemset(output.ptr<std::uint32_t>(), 0, output_numel * sizeof(std::uint32_t)); const std::error_code pc_ret = camera.retrieveMeasure(pointcloud, sl::MEASURE_XYZBGRA, sl::MEM_GPU); if (pc_ret) { throw std::system_error(pc_ret); } const auto output_cols = static_cast<std::uint32_t>(output.size().width); const auto output_rows = static_cast<std::uint32_t>(output.size().height); const auto output_stride = static_cast<std::uint32_t>(output.step / output.elemSize()); const float x_range = static_cast<float>(output_cols) * resolution; const float y_range = static_cast<float>(output_rows) * resolution; const auto numel = static_cast<std::uint32_t>(pointcloud.getResolution().area()); const std::uint32_t num_blocks = div_to_inf(numel, BLOCKSIZE); transform<<<num_blocks, BLOCKSIZE>>>( tf, pointcloud.getPtr<sl::float4>(sl::MEM_GPU), numel, output.ptr<std::uint32_t>(), output_rows, output_cols, output_stride, resolution, x_range / 2, y_range / 2 ); } } // namespace simt_tf
a6709a046390039a07f02e9200405f01d965af52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/cross_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/reduce_function.h" namespace phi { template <typename T> __global__ void Cross(const T* x, const T* y, T* out, const int stride, const int N, phi::funcs::IndexCalculator index_calculator) { CUDA_KERNEL_LOOP(i, N) { int offset = index_calculator(i); auto pos0 = offset + 0 * stride; auto pos1 = offset + 1 * stride; auto pos2 = offset + 2 * stride; out[pos0] = x[pos1] * y[pos2] - x[pos2] * y[pos1]; out[pos1] = x[pos2] * y[pos0] - x[pos0] * y[pos2]; out[pos2] = x[pos0] * y[pos1] - x[pos1] * y[pos0]; } } template <typename T, typename Context> void CrossKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { auto& input_x = x; auto& input_y = y; auto* output = out; int dim = axis; auto input_x_dims = input_x.dims(); if (dim != DDim::kMaxRank) { PADDLE_ENFORCE_EQ( dim < input_x_dims.size() && dim >= (0 - input_x_dims.size()), true, phi::errors::OutOfRange( "Attr(dim) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(dim) = %d.", input_x_dims.size(), input_x_dims.size() - 1, dim)); if (dim < 0) { dim += input_x_dims.size(); } PADDLE_ENFORCE_EQ( input_x_dims[dim] == 3, true, phi::errors::InvalidArgument( "Input(X/Y).dims[dim] must be equal to 3. But received: " "Input(X/Y).dims[dim] = [%d].", input_x_dims[dim])); } else { for (auto i = 0; i < input_x_dims.size(); i++) { if (input_x_dims[i] == 3) { dim = i; break; } } PADDLE_ENFORCE_EQ(dim == DDim::kMaxRank, false, phi::errors::InvalidArgument( "There must be at least one dimension 'd' so that " "Input(X/Y).dims()[d] is equal to 3. " "But received: Input(X/Y).dims() == [%s].", input_x_dims)); } std::vector<int> cal_dims; std::vector<int> left_strides; std::vector<int> full_strides; std::vector<int> merged_dims; for (int i = 0; i < dim; i++) { if (i == 0) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[0] *= input_x_dims[i]; } } int merge_axis = merged_dims.size(); merged_dims.push_back(input_x_dims[dim]); for (int i = dim + 1; i < input_x_dims.size(); i++) { if (i == dim + 1) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[merge_axis + 1] *= input_x_dims[i]; } } int full_dim = 1; for (int i = 0; i < merged_dims.size(); i++) { full_strides.insert(full_strides.begin(), full_dim); full_dim *= merged_dims[merged_dims.size() - i - 1]; if (i == merge_axis) { continue; } cal_dims.push_back(i); } int left_dim = 1; for (int i = merged_dims.size() - 1; i >= 0; i--) { if (i == merge_axis) { continue; } left_strides.insert(left_strides.begin(), left_dim); left_dim *= merged_dims[i]; } const auto* input_x_data = input_x.data<T>(); const auto* input_y_data = input_y.data<T>(); auto* out_data = dev_ctx.template Alloc<T>(out); auto index_calculator = phi::funcs::IndexCalculator( merged_dims.size() - 1, cal_dims, left_strides, full_strides); int64_t numel = x.numel(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel / 3); hipLaunchKernelGGL(( Cross), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), input_x_data, input_y_data, out_data, full_strides[merge_axis], numel / 3, index_calculator); } } // namespace phi PD_REGISTER_KERNEL( cross, GPU, ALL_LAYOUT, phi::CrossKernel, float, double, int, int64_t) {}
a6709a046390039a07f02e9200405f01d965af52.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/cross_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/reduce_function.h" namespace phi { template <typename T> __global__ void Cross(const T* x, const T* y, T* out, const int stride, const int N, phi::funcs::IndexCalculator index_calculator) { CUDA_KERNEL_LOOP(i, N) { int offset = index_calculator(i); auto pos0 = offset + 0 * stride; auto pos1 = offset + 1 * stride; auto pos2 = offset + 2 * stride; out[pos0] = x[pos1] * y[pos2] - x[pos2] * y[pos1]; out[pos1] = x[pos2] * y[pos0] - x[pos0] * y[pos2]; out[pos2] = x[pos0] * y[pos1] - x[pos1] * y[pos0]; } } template <typename T, typename Context> void CrossKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { auto& input_x = x; auto& input_y = y; auto* output = out; int dim = axis; auto input_x_dims = input_x.dims(); if (dim != DDim::kMaxRank) { PADDLE_ENFORCE_EQ( dim < input_x_dims.size() && dim >= (0 - input_x_dims.size()), true, phi::errors::OutOfRange( "Attr(dim) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(dim) = %d.", input_x_dims.size(), input_x_dims.size() - 1, dim)); if (dim < 0) { dim += input_x_dims.size(); } PADDLE_ENFORCE_EQ( input_x_dims[dim] == 3, true, phi::errors::InvalidArgument( "Input(X/Y).dims[dim] must be equal to 3. But received: " "Input(X/Y).dims[dim] = [%d].", input_x_dims[dim])); } else { for (auto i = 0; i < input_x_dims.size(); i++) { if (input_x_dims[i] == 3) { dim = i; break; } } PADDLE_ENFORCE_EQ(dim == DDim::kMaxRank, false, phi::errors::InvalidArgument( "There must be at least one dimension 'd' so that " "Input(X/Y).dims()[d] is equal to 3. " "But received: Input(X/Y).dims() == [%s].", input_x_dims)); } std::vector<int> cal_dims; std::vector<int> left_strides; std::vector<int> full_strides; std::vector<int> merged_dims; for (int i = 0; i < dim; i++) { if (i == 0) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[0] *= input_x_dims[i]; } } int merge_axis = merged_dims.size(); merged_dims.push_back(input_x_dims[dim]); for (int i = dim + 1; i < input_x_dims.size(); i++) { if (i == dim + 1) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[merge_axis + 1] *= input_x_dims[i]; } } int full_dim = 1; for (int i = 0; i < merged_dims.size(); i++) { full_strides.insert(full_strides.begin(), full_dim); full_dim *= merged_dims[merged_dims.size() - i - 1]; if (i == merge_axis) { continue; } cal_dims.push_back(i); } int left_dim = 1; for (int i = merged_dims.size() - 1; i >= 0; i--) { if (i == merge_axis) { continue; } left_strides.insert(left_strides.begin(), left_dim); left_dim *= merged_dims[i]; } const auto* input_x_data = input_x.data<T>(); const auto* input_y_data = input_y.data<T>(); auto* out_data = dev_ctx.template Alloc<T>(out); auto index_calculator = phi::funcs::IndexCalculator( merged_dims.size() - 1, cal_dims, left_strides, full_strides); int64_t numel = x.numel(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel / 3); Cross<<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(input_x_data, input_y_data, out_data, full_strides[merge_axis], numel / 3, index_calculator); } } // namespace phi PD_REGISTER_KERNEL( cross, GPU, ALL_LAYOUT, phi::CrossKernel, float, double, int, int64_t) {}
bf9516f78e0d394382fa79388560a18402fae998.hip
// !!! This is a file automatically generated by hipify!!! #include "hip_runtime.h" #define LIMIT -999 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "needle.h" #include <hip/hip_runtime.h> #include <sys/time.h> // includes, kernels #include "needle_kernel_hip.cu" #ifdef PROFILING #include "RDTimer.h" SimplePerfSerializer* serializeTime; RDTimerCPU* rdtimercpu; #endif //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { printf("WG size of kernel = %d \n", BLOCK_SIZE); #ifdef PROFILING float overall_cpu_t; RDTimerCPU* rdtimerOverallCpu = new RDTimerCPU(); rdtimerOverallCpu->Reset("Overall CPU Time"); serializeTime = new SimplePerfSerializer( argv[0] ); rdtimercpu = new RDTimerCPU(); rdtimerOverallCpu->Start(); #endif runTest( argc, argv); #ifdef PROFILING overall_cpu_t = rdtimerOverallCpu->Stop(); serializeTime->Serialize(rdtimerOverallCpu); //printf("Overall CPU time = %fs\n", overall_cpu_t); delete rdtimercpu; delete rdtimerOverallCpu; delete serializeTime; #endif return EXIT_SUCCESS; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); exit(1); } void runTest( int argc, char** argv) { int max_rows, max_cols, penalty; int *input_itemsets, *output_itemsets, *referrence; int *matrix_cuda, *referrence_cuda; int size; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 3) { max_rows = atoi(argv[1]); max_cols = atoi(argv[1]); penalty = atoi(argv[2]); } else{ usage(argc, argv); } if(atoi(argv[1])%16!=0){ fprintf(stderr,"The dimension values must be a multiple of 16\n"); exit(1); } max_rows = max_rows + 1; max_cols = max_cols + 1; referrence = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); srand ( 7 ); for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } printf("Start Needleman-Wunsch\n"); for( int i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets[i*max_cols] = rand() % 10 + 1; } for( int j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets[j] = rand() % 10 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) { input_itemsets[i*max_cols] = -i * penalty; } for( int j = 1; j< max_cols ; j++) { input_itemsets[j] = -j * penalty; } size = max_cols * max_rows; #ifdef PROFILING rdtimercpu->Reset("Malloc Time"); rdtimercpu->Start(); #endif hipMalloc((void**)& referrence_cuda, sizeof(int)*size); hipMalloc((void**)& matrix_cuda, sizeof(int)*size); #ifdef PROFILING rdtimercpu->Stop(); serializeTime->Serialize(rdtimercpu); rdtimercpu->Reset("CPU to GPU Transfer Time"); rdtimercpu->Start(); #endif hipMemcpy(referrence_cuda, referrence, sizeof(int) * size, hipMemcpyHostToDevice); hipMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, hipMemcpyHostToDevice); #ifdef PROFILING rdtimercpu->Stop(); serializeTime->Serialize(rdtimercpu); rdtimercpu->Reset("COMPUTE:Compute Time"); rdtimercpu->Start(); #endif dim3 dimGrid; dim3 dimBlock(BLOCK_SIZE, 1); int block_width = ( max_cols - 1 )/BLOCK_SIZE; //printf("Processing top-left matrix\n"); //process top-left matrix for( int i = 1 ; i <= block_width ; i++){ dimGrid.x = i; dimGrid.y = 1; hipLaunchKernel(needle_cuda_shared_1, dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda ,max_cols, penalty, i, block_width); } //printf("Processing bottom-right matrix\n"); //process bottom-right matrix for( int i = block_width - 1 ; i >= 1 ; i--){ dimGrid.x = i; dimGrid.y = 1; hipLaunchKernel(needle_cuda_shared_2, dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda ,max_cols, penalty, i, block_width); } #ifdef PROFILING rdtimercpu->Stop(); serializeTime->Serialize(rdtimercpu); rdtimercpu->Reset("GPU to CPU Transfer Time"); rdtimercpu->Start(); #endif hipMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, hipMemcpyDeviceToHost); #ifdef PROFILING rdtimercpu->Stop(); serializeTime->Serialize(rdtimercpu); #endif #define TRACEBACK #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); fprintf(fpo, "print traceback value GPU:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + referrence[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fclose(fpo); // CPU Trackback #endif hipFree(referrence_cuda); hipFree(matrix_cuda); free(referrence); free(input_itemsets); free(output_itemsets); }
bf9516f78e0d394382fa79388560a18402fae998.cu
#include "hip_runtime.h" #define LIMIT -999 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "needle.h" #include <cuda.h> #include <sys/time.h> // includes, kernels #include "needle_kernel_hip.cu" #ifdef PROFILING #include "RDTimer.h" SimplePerfSerializer* serializeTime; RDTimerCPU* rdtimercpu; #endif //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { printf("WG size of kernel = %d \n", BLOCK_SIZE); #ifdef PROFILING float overall_cpu_t; RDTimerCPU* rdtimerOverallCpu = new RDTimerCPU(); rdtimerOverallCpu->Reset("Overall CPU Time"); serializeTime = new SimplePerfSerializer( argv[0] ); rdtimercpu = new RDTimerCPU(); rdtimerOverallCpu->Start(); #endif runTest( argc, argv); #ifdef PROFILING overall_cpu_t = rdtimerOverallCpu->Stop(); serializeTime->Serialize(rdtimerOverallCpu); //printf("Overall CPU time = %fs\n", overall_cpu_t); delete rdtimercpu; delete rdtimerOverallCpu; delete serializeTime; #endif return EXIT_SUCCESS; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); exit(1); } void runTest( int argc, char** argv) { int max_rows, max_cols, penalty; int *input_itemsets, *output_itemsets, *referrence; int *matrix_cuda, *referrence_cuda; int size; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 3) { max_rows = atoi(argv[1]); max_cols = atoi(argv[1]); penalty = atoi(argv[2]); } else{ usage(argc, argv); } if(atoi(argv[1])%16!=0){ fprintf(stderr,"The dimension values must be a multiple of 16\n"); exit(1); } max_rows = max_rows + 1; max_cols = max_cols + 1; referrence = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); srand ( 7 ); for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } printf("Start Needleman-Wunsch\n"); for( int i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets[i*max_cols] = rand() % 10 + 1; } for( int j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets[j] = rand() % 10 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) { input_itemsets[i*max_cols] = -i * penalty; } for( int j = 1; j< max_cols ; j++) { input_itemsets[j] = -j * penalty; } size = max_cols * max_rows; #ifdef PROFILING rdtimercpu->Reset("Malloc Time"); rdtimercpu->Start(); #endif hipMalloc((void**)& referrence_cuda, sizeof(int)*size); hipMalloc((void**)& matrix_cuda, sizeof(int)*size); #ifdef PROFILING rdtimercpu->Stop(); serializeTime->Serialize(rdtimercpu); rdtimercpu->Reset("CPU to GPU Transfer Time"); rdtimercpu->Start(); #endif hipMemcpy(referrence_cuda, referrence, sizeof(int) * size, hipMemcpyHostToDevice); hipMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, hipMemcpyHostToDevice); #ifdef PROFILING rdtimercpu->Stop(); serializeTime->Serialize(rdtimercpu); rdtimercpu->Reset("COMPUTE:Compute Time"); rdtimercpu->Start(); #endif dim3 dimGrid; dim3 dimBlock(BLOCK_SIZE, 1); int block_width = ( max_cols - 1 )/BLOCK_SIZE; //printf("Processing top-left matrix\n"); //process top-left matrix for( int i = 1 ; i <= block_width ; i++){ dimGrid.x = i; dimGrid.y = 1; hipLaunchKernel(needle_cuda_shared_1, dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda ,max_cols, penalty, i, block_width); } //printf("Processing bottom-right matrix\n"); //process bottom-right matrix for( int i = block_width - 1 ; i >= 1 ; i--){ dimGrid.x = i; dimGrid.y = 1; hipLaunchKernel(needle_cuda_shared_2, dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda ,max_cols, penalty, i, block_width); } #ifdef PROFILING rdtimercpu->Stop(); serializeTime->Serialize(rdtimercpu); rdtimercpu->Reset("GPU to CPU Transfer Time"); rdtimercpu->Start(); #endif hipMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, hipMemcpyDeviceToHost); #ifdef PROFILING rdtimercpu->Stop(); serializeTime->Serialize(rdtimercpu); #endif #define TRACEBACK #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); fprintf(fpo, "print traceback value GPU:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + referrence[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fclose(fpo); // CPU Trackback #endif hipFree(referrence_cuda); hipFree(matrix_cuda); free(referrence); free(input_itemsets); free(output_itemsets); }
0bd8fee4d40d15bc6e2903122389cca4ba7463b2.hip
// !!! This is a file automatically generated by hipify!!! #include <gputk.h> #define NUM_BINS 128 #define CUDA_CHECK(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } int main(int argc, char *argv[]) { gpuTKArg_t args; int inputLength; char *hostInput; unsigned int *hostBins; char *deviceInput; unsigned int *deviceBins; args = gpuTKArg_read(argc, argv); gpuTKTime_start(Generic, "Importing data and creating memory on host"); hostInput = (char *)gpuTKImport(gpuTKArg_getInputFile(args, 0), &inputLength, "Text"); hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int)); gpuTKTime_stop(Generic, "Importing data and creating memory on host"); gpuTKLog(TRACE, "The input length is ", inputLength); gpuTKLog(TRACE, "The number of bins is ", NUM_BINS); gpuTKTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here CUDA_CHECK(hipDeviceSynchronize()); gpuTKTime_stop(GPU, "Allocating GPU memory."); gpuTKTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here CUDA_CHECK(hipDeviceSynchronize()); gpuTKTime_stop(GPU, "Copying input memory to the GPU."); // Launch kernel // ---------------------------------------------------------- gpuTKLog(TRACE, "Launching kernel"); gpuTKTime_start(Compute, "Performing CUDA computation"); //@@ Perform kernel computation here gpuTKTime_stop(Compute, "Performing CUDA computation"); gpuTKTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here CUDA_CHECK(hipDeviceSynchronize()); gpuTKTime_stop(Copy, "Copying output memory to the CPU"); gpuTKTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here gpuTKTime_stop(GPU, "Freeing GPU Memory"); // Verify correctness // ----------------------------------------------------- gpuTKSolution(args, hostBins, NUM_BINS); free(hostBins); free(hostInput); return 0; }
0bd8fee4d40d15bc6e2903122389cca4ba7463b2.cu
#include <gputk.h> #define NUM_BINS 128 #define CUDA_CHECK(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int main(int argc, char *argv[]) { gpuTKArg_t args; int inputLength; char *hostInput; unsigned int *hostBins; char *deviceInput; unsigned int *deviceBins; args = gpuTKArg_read(argc, argv); gpuTKTime_start(Generic, "Importing data and creating memory on host"); hostInput = (char *)gpuTKImport(gpuTKArg_getInputFile(args, 0), &inputLength, "Text"); hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int)); gpuTKTime_stop(Generic, "Importing data and creating memory on host"); gpuTKLog(TRACE, "The input length is ", inputLength); gpuTKLog(TRACE, "The number of bins is ", NUM_BINS); gpuTKTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here CUDA_CHECK(cudaDeviceSynchronize()); gpuTKTime_stop(GPU, "Allocating GPU memory."); gpuTKTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here CUDA_CHECK(cudaDeviceSynchronize()); gpuTKTime_stop(GPU, "Copying input memory to the GPU."); // Launch kernel // ---------------------------------------------------------- gpuTKLog(TRACE, "Launching kernel"); gpuTKTime_start(Compute, "Performing CUDA computation"); //@@ Perform kernel computation here gpuTKTime_stop(Compute, "Performing CUDA computation"); gpuTKTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here CUDA_CHECK(cudaDeviceSynchronize()); gpuTKTime_stop(Copy, "Copying output memory to the CPU"); gpuTKTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here gpuTKTime_stop(GPU, "Freeing GPU Memory"); // Verify correctness // ----------------------------------------------------- gpuTKSolution(args, hostBins, NUM_BINS); free(hostBins); free(hostInput); return 0; }
3b1de2d27b1b6681785c38a5e7881050b6bc16d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "VoxelGrid.h" #include "debug.h" #include "common.h" #include <math.h> #include <limits> #include <eigen3/Eigen/Eigenvalues> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/scan.h> #include <thrust/fill.h> #include <inttypes.h> namespace gpu { //Copied from cuda c programming guide #if __CUDA_ARCH__ < 600 extern "C" __device__ double atomicAddD(double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif GVoxelGrid::~GVoxelGrid() { if (global_voxel_ != NULL) checkCudaErrors(hipFree(global_voxel_)); if (centroid_ != NULL) checkCudaErrors(hipFree(centroid_)); if (covariance_ != NULL) checkCudaErrors(hipFree(covariance_)); if (inverse_covariance_ != NULL) checkCudaErrors(hipFree(inverse_covariance_)); if (valid_points_ != NULL) checkCudaErrors(hipFree(valid_points_)); if (starting_voxel_id_ != NULL) checkCudaErrors(hipFree(starting_voxel_id_)); if (voxel_id_ != NULL) checkCudaErrors(hipFree(voxel_id_)); } extern "C" __global__ void initVoxelGrid(GVoxel *voxel_grid, int vgrid_x, int vgrid_y, int vgrid_z, float min_z, float min_x, float min_y, float voxel_x, float voxel_y, float voxel_z, double *centroid_buff, double *cov_buff, double *inverse_cov_buff) { int id_x = threadIdx.x + blockIdx.x * blockDim.x; int id_y = threadIdx.y + blockIdx.y * blockDim.y; int id_z = threadIdx.z + blockIdx.z * blockDim.z; if (id_x < vgrid_x && id_y < vgrid_y && id_z < vgrid_z) { int vgrid_id = id_x + id_y * vgrid_x + id_z * vgrid_x * vgrid_y; GVoxel *voxel = voxel_grid + vgrid_id; voxel->minZ() = min_z + id_z * voxel_z; voxel->minX() = min_x + id_x * voxel_x; voxel->minY() = min_y + id_y * voxel_y; voxel->maxZ() = min_z + id_z * voxel_z + voxel_z; voxel->maxX() = min_x + id_x * voxel_x + voxel_x; voxel->maxY() = min_y + id_y * voxel_y + voxel_y; voxel->centroid() = MatrixDevice(1, 3, vgrid_x * vgrid_y * vgrid_z, centroid_buff); voxel->covariance() = MatrixDevice(3, 3, vgrid_x * vgrid_y * vgrid_z, cov_buff); voxel->inverseCovariance() = MatrixDevice(3, 3, vgrid_x * vgrid_y * vgrid_z, inverse_cov_buff); voxel->pointNum() = 0; } } void GVoxelGrid::initialize() { checkCudaErrors(hipMalloc(&centroid_, sizeof(double) * 3 * vgrid_x_ * vgrid_y_ * vgrid_z_)); checkCudaErrors(hipMalloc(&covariance_, sizeof(double) * 9 * vgrid_x_ * vgrid_y_ * vgrid_z_)); checkCudaErrors(hipMalloc(&inverse_covariance_, sizeof(double) * 9 * vgrid_x_ * vgrid_y_ * vgrid_z_)); int block_x = (vgrid_x_ > BLOCK_X) ? BLOCK_X : vgrid_x_; int block_y = (vgrid_y_ > BLOCK_Y) ? BLOCK_Y : vgrid_y_; int block_z = (vgrid_z_ > BLOCK_Z) ? BLOCK_Z : vgrid_z_; int grid_x = (vgrid_x_ - 1) / block_x + 1; int grid_y = (vgrid_y_ - 1) / block_y + 1; int grid_z = (vgrid_z_ - 1) / block_z + 1; dim3 block(block_x, block_y, block_z); dim3 grid(grid_x, grid_y, grid_z); hipLaunchKernelGGL(( initVoxelGrid), dim3(grid), dim3(block), 0, 0, global_voxel_, vgrid_x_, vgrid_y_, vgrid_z_, min_z_, min_x_, min_y_, voxel_x_, voxel_y_, voxel_z_, centroid_, covariance_, inverse_covariance_); checkCudaErrors(hipDeviceSynchronize()); } __device__ int voxelId(float x, float y, float z, float voxel_x, float voxel_y, float voxel_z, int min_b_x, int min_b_y, int min_b_z, int vgrid_x, int vgrid_y, int vgrid_z) { int id_x = static_cast<int>(floor(x / voxel_x) - static_cast<float>(min_b_x)); int id_y = static_cast<int>(floor(y / voxel_y) - static_cast<float>(min_b_y)); int id_z = static_cast<int>(floor(z / voxel_z) - static_cast<float>(min_b_z)); return (id_x + id_y * vgrid_x + id_z * vgrid_x * vgrid_y); } /* First pass: insert points to voxels * Number of points, coordinate sum, and initial covariance * matrix in a voxel is calculated by atomicAdd. */ extern "C" __global__ void insertPointsToGrid(float *x, float *y, float *z, int points_num, GVoxel *voxel_grid, int vgrid_x, int vgrid_y, int vgrid_z, float voxel_x, float voxel_y, float voxel_z, int min_b_x, int min_b_y, int min_b_z) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < points_num; i += stride) { float t_x = x[i]; float t_y = y[i]; float t_z = z[i]; int voxel_id = voxelId(t_x, t_y, t_z, voxel_x, voxel_y, voxel_z, min_b_x, min_b_y, min_b_z, vgrid_x, vgrid_y, vgrid_z); GVoxel *voxel = voxel_grid + voxel_id; MatrixDevice centr = voxel->centroid(); MatrixDevice cov = voxel->covariance(); atomicAdd(voxel->pointNumAddress(), 1); atomicAddD(centr.cellAddr(0), t_x); atomicAddD(centr.cellAddr(1), t_y); atomicAddD(centr.cellAddr(2), t_z); atomicAddD(cov.cellAddr(0, 0), t_x * t_x); atomicAddD(cov.cellAddr(0, 1), t_x * t_y); atomicAddD(cov.cellAddr(0, 2), t_x * t_z); atomicAddD(cov.cellAddr(1, 0), t_y * t_x); atomicAddD(cov.cellAddr(1, 1), t_y * t_y); atomicAddD(cov.cellAddr(1, 2), t_y * t_z); atomicAddD(cov.cellAddr(2, 0), t_z * t_x); atomicAddD(cov.cellAddr(2, 1), t_z * t_y); atomicAddD(cov.cellAddr(2, 2), t_z * t_z); } } /* Second pass: update coordinate mean (centroid) and * covariance matrix of each cell */ extern "C" __global__ void updateVoxelCentroid(GVoxel *voxel_grid, int voxel_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int vid = index; vid < voxel_num; vid += stride) { GVoxel *node = voxel_grid + vid; int points_num = node->pointNum(); if (points_num == 0) return; MatrixDevice centr = node->centroid(); MatrixDevice cov = node->covariance(); MatrixDevice icov = node->inverseCovariance(); centr /= points_num; cov /= points_num; cov(0, 0) -= centr(0) * centr(0); cov(0, 1) -= centr(0) * centr(1); cov(0, 2) -= centr(0) * centr(2); cov(1, 0) = cov(0, 1); cov(1, 1) -= centr(1) * centr(1); cov(1, 2) -= centr(1) * centr(2); cov(2, 0) = cov(0, 2); cov(2, 1) = cov(1, 2); cov(2, 2) -= centr(2) * centr(2); Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> eigensolver; Eigen::Matrix3d eigen_val; Eigen::Vector3d pt_sum; Eigen::Matrix3d cov_mat; Eigen::Matrix3d eigen_vectors; Eigen::Matrix3d cov_mat_inverse; cov_mat(0, 0) = cov(0, 0); cov_mat(0, 1) = cov(0, 1); cov_mat(0, 2) = cov(0, 2); cov_mat(1, 0) = cov(1, 0); cov_mat(1, 1) = cov(1, 1); cov_mat(1, 2) = cov(1, 2); cov_mat(2, 0) = cov(2, 0); cov_mat(2, 1) = cov(2, 1); cov_mat(2, 2) = cov(2, 2); eigensolver.compute(cov_mat); eigen_val = eigensolver.eigenvalues().asDiagonal(); eigen_vectors = eigensolver.eigenvectors(); if (eigen_val(0,0) < 0 || eigen_val(1, 1) < 0 || eigen_val(2, 2) <= 0) { node->pointNum() = -1; return; } float min_eigen_val = eigen_val(2, 2) / 100; if (eigen_val(0, 0) < min_eigen_val) { eigen_val(0, 0) = min_eigen_val; if (eigen_val(1, 1) < min_eigen_val) eigen_val(1, 1) = min_eigen_val; cov_mat = eigen_vectors * eigen_val * eigen_vectors.inverse(); } cov_mat_inverse = cov_mat.inverse(); cov(0, 0) = cov_mat(0, 0); cov(0, 1) = cov_mat(0, 1); cov(0, 2) = cov_mat(0, 2); cov(1, 0) = cov_mat(1, 0); cov(1, 1) = cov_mat(1, 1); cov(1, 2) = cov_mat(1, 2); cov(2, 0) = cov_mat(2, 0); cov(2, 1) = cov_mat(2, 1); cov(2, 2) = cov_mat(2, 2); icov(0, 0) = cov_mat_inverse(0, 0); icov(0, 1) = cov_mat_inverse(0, 1); icov(0, 2) = cov_mat_inverse(0, 2); icov(1, 0) = cov_mat_inverse(1, 0); icov(1, 1) = cov_mat_inverse(1, 1); icov(1, 2) = cov_mat_inverse(1, 2); icov(2, 0) = cov_mat_inverse(2, 0); icov(2, 1) = cov_mat_inverse(2, 1); icov(2, 2) = cov_mat_inverse(2, 2); } } void GVoxelGrid::insertPoints() { int block_x = (points_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num_; int grid_x = (points_num_ - 1) / block_x + 1; hipLaunchKernelGGL(( insertPointsToGrid), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_num_, global_voxel_, vgrid_x_, vgrid_y_, vgrid_z_, voxel_x_, voxel_y_, voxel_z_, min_b_x_, min_b_y_, min_b_z_); int voxel_num = vgrid_x_ * vgrid_y_ * vgrid_z_; block_x = (voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : voxel_num; grid_x = (voxel_num - 1) / block_x + 1; hipLaunchKernelGGL(( updateVoxelCentroid), dim3(grid_x), dim3(block_x), 0, 0, global_voxel_, voxel_num); checkCudaErrors(hipDeviceSynchronize()); } //Input are supposed to be in device memory void GVoxelGrid::setInput(float *x, float *y, float *z, int points_num) { if (points_num <= 0) return; x_ = x; y_ = y; z_ = z; points_num_ = points_num; findBoundaries(); checkCudaErrors(hipMalloc(&global_voxel_, sizeof(GVoxel) * vgrid_x_ * vgrid_y_ * vgrid_z_)); findBoundaries(); initialize(); insertPoints(); } extern "C" __global__ void findMax(float *x, float *y, float *z, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) x[i] = (i + half_size < full_size) ? ((x[i] >= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i]; } extern "C" __global__ void findMin(float *x, float *y, float *z, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) x[i] = (i + half_size < full_size) ? ((x[i] <= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i]; } void GVoxelGrid::findBoundaries() { float *max_x, *max_y, *max_z, *min_x, *min_y, *min_z; checkCudaErrors(hipMalloc(&max_x, sizeof(float) * points_num_)); checkCudaErrors(hipMalloc(&max_y, sizeof(float) * points_num_)); checkCudaErrors(hipMalloc(&max_z, sizeof(float) * points_num_)); checkCudaErrors(hipMalloc(&min_x, sizeof(float) * points_num_)); checkCudaErrors(hipMalloc(&min_y, sizeof(float) * points_num_)); checkCudaErrors(hipMalloc(&min_z, sizeof(float) * points_num_)); checkCudaErrors(hipMemcpy(max_x, x_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(max_y, y_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(max_z, z_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(min_x, x_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(min_y, y_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(min_z, z_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice)); int points_num = points_num_; while (points_num > 1) { int half_points_num = (points_num - 1) / 2 + 1; int block_x = (half_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_points_num; int grid_x = (half_points_num - 1) / block_x + 1; hipLaunchKernelGGL(( findMax), dim3(grid_x), dim3(block_x), 0, 0, max_x, max_y, max_z, points_num, half_points_num); hipLaunchKernelGGL(( findMin), dim3(grid_x), dim3(block_x), 0, 0, min_x, min_y, min_z, points_num, half_points_num); points_num = half_points_num; } checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemcpy(&max_x_, max_x, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&max_y_, max_y, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&max_z_, max_z, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&min_x_, min_x, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&min_y_, min_y, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&min_z_, min_z, sizeof(float), hipMemcpyDeviceToHost)); max_b_x_ = static_cast<int> (floor(max_x_ / voxel_x_)); max_b_y_ = static_cast<int> (floor(max_y_ / voxel_y_)); max_b_z_ = static_cast<int> (floor(max_z_ / voxel_z_)); min_b_x_ = static_cast<int> (floor(min_x_ / voxel_x_)); min_b_y_ = static_cast<int> (floor(min_y_ / voxel_y_)); min_b_z_ = static_cast<int> (floor(min_z_ / voxel_z_)); vgrid_x_ = max_b_x_ - min_b_x_ + 1; vgrid_y_ = max_b_y_ - min_b_y_ + 1; vgrid_z_ = max_b_z_ - min_b_z_ + 1; } __device__ float squareDistance(float x, float y, float z, float a, float b, float c) { return (x - a) * (x - a) + (y - b) * (y - b) + (z - c) * (z - c); } extern "C" __global__ void radiusSearch1(float *x, float *y, float *z, float radius, int max_nn, int points_num, GVoxel *grid, int vgrid_x, int vgrid_y, int vgrid_z, float voxel_x, float voxel_y, float voxel_z, int min_b_x, int min_b_y, int min_b_z, int max_b_x, int max_b_y, int max_b_z, int *max_vid_x, int *max_vid_y, int *max_vid_z, int *min_vid_x, int *min_vid_y, int *min_vid_z, int *found_voxel_num, int *valid_points) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < points_num; i += stride) { float t_x = x[i]; float t_y = y[i]; float t_z = z[i]; int id_x = static_cast<int>(floorf(t_x / voxel_x) - static_cast<float>(min_b_x)); int id_y = static_cast<int>(floorf(t_y / voxel_y) - static_cast<float>(min_b_y)); int id_z = static_cast<int>(floorf(t_z / voxel_z) - static_cast<float>(min_b_z)); int max_id_x = static_cast<int>(ceilf((t_x + radius) / voxel_x) - static_cast<float>(min_b_x)); int max_id_y = static_cast<int>(ceilf((t_y + radius) / voxel_y) - static_cast<float>(min_b_y)); int max_id_z = static_cast<int>(ceilf((t_z + radius) / voxel_z) - static_cast<float>(min_b_z)); int min_id_x = static_cast<int>(ceilf((t_x - radius) / voxel_x) - static_cast<float>(min_b_x)); int min_id_y = static_cast<int>(ceilf((t_y - radius) / voxel_y) - static_cast<float>(min_b_y)); int min_id_z = static_cast<int>(ceilf((t_z - radius) / voxel_z) - static_cast<float>(min_b_z)); /* Find intersection of the cube containing the * NN sphere of the point and the voxel grid */ max_id_x = (max_id_x > max_b_x) ? max_b_x : max_id_x; max_id_y = (max_id_y > max_b_y) ? max_b_y : max_id_y; max_id_z = (max_id_z > max_b_z) ? max_b_z : max_id_z; min_id_x = (min_id_x < min_b_x) ? min_b_x : min_id_x; min_id_y = (min_id_y < min_b_y) ? min_b_y : min_id_y; min_id_z = (min_id_z < min_b_z) ? min_b_z : min_id_z; int nn = 0; for (int j = min_id_x; j <= max_id_x && nn < max_nn; j++) { for (int k = min_id_y; k <= max_id_y && nn < max_nn; k++) { for (int l = min_id_z; l <= max_id_z && nn < max_nn; l++) { int voxel_id = j + k * vgrid_x + l * vgrid_x * vgrid_y; GVoxel *voxel = grid + voxel_id; int point_num = voxel->pointNum(); float centroid_x = (point_num > 0) ? voxel->centroid()(0) : FLT_MAX; float centroid_y = (point_num > 0) ? voxel->centroid()(1) : FLT_MAX; float centroid_z = (point_num > 0) ? voxel->centroid()(2) : FLT_MAX; nn += (squareDistance(centroid_x, centroid_y, centroid_z, t_x, t_y, t_z) <= radius * radius) ? 1 : 0; } } } found_voxel_num[i] = nn; valid_points[i] = (nn == 0) ? 0 : 1; max_vid_x[i] = max_id_x; max_vid_y[i] = max_id_y; max_vid_z[i] = max_id_z; min_vid_x[i] = min_id_x; min_vid_y[i] = min_id_y; min_vid_z[i] = min_id_z; } } extern "C" __global__ void collectValidPoints(int *input, int *output, int *writing_location, int size) { for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < size; index += blockDim.x * gridDim.x) { if (input[index] != 0) { output[writing_location[index]] = index; } } } extern "C" __global__ void radiusSearch2(float *x, float *y, float *z, float radius, int max_nn, int points_num, GVoxel *grid, int vgrid_x, int vgrid_y, int vgrid_z, int *max_vid_x, int *max_vid_y, int *max_vid_z, int *min_vid_x, int *min_vid_y, int *min_vid_z, int *found_voxel_num, int *voxel_id) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < points_num; i += stride) { float t_x = x[i]; float t_y = y[i]; float t_z = z[i]; int max_id_x = max_vid_x[i]; int max_id_y = max_vid_y[i]; int max_id_z = max_vid_z[i]; int min_id_x = min_vid_x[i]; int min_id_y = min_vid_y[i]; int min_id_z = min_vid_z[i]; int nn = 0; int write_location = found_voxel_num[i]; int pn = found_voxel_num[i + 1] - found_voxel_num[i]; if (pn == 0) return; for (int j = min_id_x; j <= max_id_x && nn < max_nn; j++) { for (int k = min_id_y; k <= max_id_y && nn < max_nn; k++) { for (int l = min_id_z; l <= max_id_z && nn < max_nn; l++) { int vid = j + k * vgrid_x + l * vgrid_x * vgrid_y; GVoxel *voxel = grid + vid; int point_num = voxel->pointNum(); float centroid_x = (point_num > 0) ? voxel->centroid()(0) : FLT_MAX; float centroid_y = (point_num > 0) ? voxel->centroid()(1) : FLT_MAX; float centroid_z = (point_num > 0) ? voxel->centroid()(2) : FLT_MAX; if (squareDistance(centroid_x, centroid_y, centroid_z, t_x, t_y, t_z) <= radius * radius) { voxel_id[write_location] = vid; write_location++; nn++; } } } } } } template <typename T> void GVoxelGrid::ExclusiveScan(T *input, int ele_num, T *sum) { thrust::device_ptr<T> dev_ptr(input); thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr); checkCudaErrors(hipDeviceSynchronize()); *sum = *(dev_ptr + ele_num - 1); } void GVoxelGrid::radiusSearch(float *qx, float *qy, float *qz, int points_num, float radius, int max_nn) { int block_x = (points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num; int grid_x = (points_num - 1) / block_x + 1; int *max_vid_x, *max_vid_y, *max_vid_z; int *min_vid_x, *min_vid_y, *min_vid_z; int *found_voxel_num; int *valid_point_tmp; checkCudaErrors(hipMalloc(&max_vid_x, sizeof(int) * points_num)); checkCudaErrors(hipMalloc(&max_vid_y, sizeof(int) * points_num)); checkCudaErrors(hipMalloc(&max_vid_z, sizeof(int) * points_num)); checkCudaErrors(hipMalloc(&min_vid_x, sizeof(int) * points_num)); checkCudaErrors(hipMalloc(&min_vid_y, sizeof(int) * points_num)); checkCudaErrors(hipMalloc(&min_vid_z, sizeof(int) * points_num)); checkCudaErrors(hipMalloc(&found_voxel_num, sizeof(int) * (points_num + 1))); checkCudaErrors(hipMalloc(&valid_point_tmp, sizeof(int) * (points_num + 1))); hipLaunchKernelGGL(( radiusSearch1), dim3(grid_x), dim3(block_x), 0, 0, qx, qy, qz, radius, max_nn, points_num, global_voxel_, vgrid_x_, vgrid_y_, vgrid_z_, voxel_x_, voxel_y_, voxel_z_, min_b_x_, min_b_y_, min_b_z_, max_b_x_, max_b_y_, max_b_z_, max_vid_x, max_vid_y, max_vid_z, min_vid_x, min_vid_y, min_vid_z, valid_point_tmp, found_voxel_num); checkCudaErrors(hipDeviceSynchronize()); int *query_status; checkCudaErrors(hipMalloc(&query_status, sizeof(int) * points_num)); checkCudaErrors(hipMemcpy(query_status, valid_point_tmp, sizeof(int) * points_num, hipMemcpyDeviceToDevice)); ExclusiveScan(found_voxel_num, points_num + 1, &qresult_size_); starting_voxel_id_ = found_voxel_num; ExclusiveScan(valid_point_tmp, points_num + 1, &valid_points_num_); checkCudaErrors(hipMalloc(&valid_points_, sizeof(int) * valid_points_num_)); checkCudaErrors(hipMalloc(&voxel_id_, sizeof(int) * qresult_size_)); hipLaunchKernelGGL(( collectValidPoints), dim3(grid_x), dim3(block_x), 0, 0, query_status, valid_points_, valid_point_tmp, points_num); hipLaunchKernelGGL(( radiusSearch2), dim3(grid_x), dim3(block_x), 0, 0, qx, qy, qz, radius, max_nn, points_num, global_voxel_, vgrid_x_, vgrid_y_, vgrid_z_, max_vid_x, max_vid_y, max_vid_z, min_vid_x, min_vid_y, min_vid_z, starting_voxel_id_, voxel_id_); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(max_vid_x)); checkCudaErrors(hipFree(max_vid_y)); checkCudaErrors(hipFree(max_vid_z)); checkCudaErrors(hipFree(min_vid_x)); checkCudaErrors(hipFree(min_vid_y)); checkCudaErrors(hipFree(min_vid_z)); checkCudaErrors(hipFree(valid_point_tmp)); } }
3b1de2d27b1b6681785c38a5e7881050b6bc16d7.cu
#include "VoxelGrid.h" #include "debug.h" #include "common.h" #include <math.h> #include <limits> #include <eigen3/Eigen/Eigenvalues> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/scan.h> #include <thrust/fill.h> #include <inttypes.h> namespace gpu { //Copied from cuda c programming guide #if __CUDA_ARCH__ < 600 extern "C" __device__ double atomicAddD(double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif GVoxelGrid::~GVoxelGrid() { if (global_voxel_ != NULL) checkCudaErrors(cudaFree(global_voxel_)); if (centroid_ != NULL) checkCudaErrors(cudaFree(centroid_)); if (covariance_ != NULL) checkCudaErrors(cudaFree(covariance_)); if (inverse_covariance_ != NULL) checkCudaErrors(cudaFree(inverse_covariance_)); if (valid_points_ != NULL) checkCudaErrors(cudaFree(valid_points_)); if (starting_voxel_id_ != NULL) checkCudaErrors(cudaFree(starting_voxel_id_)); if (voxel_id_ != NULL) checkCudaErrors(cudaFree(voxel_id_)); } extern "C" __global__ void initVoxelGrid(GVoxel *voxel_grid, int vgrid_x, int vgrid_y, int vgrid_z, float min_z, float min_x, float min_y, float voxel_x, float voxel_y, float voxel_z, double *centroid_buff, double *cov_buff, double *inverse_cov_buff) { int id_x = threadIdx.x + blockIdx.x * blockDim.x; int id_y = threadIdx.y + blockIdx.y * blockDim.y; int id_z = threadIdx.z + blockIdx.z * blockDim.z; if (id_x < vgrid_x && id_y < vgrid_y && id_z < vgrid_z) { int vgrid_id = id_x + id_y * vgrid_x + id_z * vgrid_x * vgrid_y; GVoxel *voxel = voxel_grid + vgrid_id; voxel->minZ() = min_z + id_z * voxel_z; voxel->minX() = min_x + id_x * voxel_x; voxel->minY() = min_y + id_y * voxel_y; voxel->maxZ() = min_z + id_z * voxel_z + voxel_z; voxel->maxX() = min_x + id_x * voxel_x + voxel_x; voxel->maxY() = min_y + id_y * voxel_y + voxel_y; voxel->centroid() = MatrixDevice(1, 3, vgrid_x * vgrid_y * vgrid_z, centroid_buff); voxel->covariance() = MatrixDevice(3, 3, vgrid_x * vgrid_y * vgrid_z, cov_buff); voxel->inverseCovariance() = MatrixDevice(3, 3, vgrid_x * vgrid_y * vgrid_z, inverse_cov_buff); voxel->pointNum() = 0; } } void GVoxelGrid::initialize() { checkCudaErrors(cudaMalloc(&centroid_, sizeof(double) * 3 * vgrid_x_ * vgrid_y_ * vgrid_z_)); checkCudaErrors(cudaMalloc(&covariance_, sizeof(double) * 9 * vgrid_x_ * vgrid_y_ * vgrid_z_)); checkCudaErrors(cudaMalloc(&inverse_covariance_, sizeof(double) * 9 * vgrid_x_ * vgrid_y_ * vgrid_z_)); int block_x = (vgrid_x_ > BLOCK_X) ? BLOCK_X : vgrid_x_; int block_y = (vgrid_y_ > BLOCK_Y) ? BLOCK_Y : vgrid_y_; int block_z = (vgrid_z_ > BLOCK_Z) ? BLOCK_Z : vgrid_z_; int grid_x = (vgrid_x_ - 1) / block_x + 1; int grid_y = (vgrid_y_ - 1) / block_y + 1; int grid_z = (vgrid_z_ - 1) / block_z + 1; dim3 block(block_x, block_y, block_z); dim3 grid(grid_x, grid_y, grid_z); initVoxelGrid<<<grid, block>>>(global_voxel_, vgrid_x_, vgrid_y_, vgrid_z_, min_z_, min_x_, min_y_, voxel_x_, voxel_y_, voxel_z_, centroid_, covariance_, inverse_covariance_); checkCudaErrors(cudaDeviceSynchronize()); } __device__ int voxelId(float x, float y, float z, float voxel_x, float voxel_y, float voxel_z, int min_b_x, int min_b_y, int min_b_z, int vgrid_x, int vgrid_y, int vgrid_z) { int id_x = static_cast<int>(floor(x / voxel_x) - static_cast<float>(min_b_x)); int id_y = static_cast<int>(floor(y / voxel_y) - static_cast<float>(min_b_y)); int id_z = static_cast<int>(floor(z / voxel_z) - static_cast<float>(min_b_z)); return (id_x + id_y * vgrid_x + id_z * vgrid_x * vgrid_y); } /* First pass: insert points to voxels * Number of points, coordinate sum, and initial covariance * matrix in a voxel is calculated by atomicAdd. */ extern "C" __global__ void insertPointsToGrid(float *x, float *y, float *z, int points_num, GVoxel *voxel_grid, int vgrid_x, int vgrid_y, int vgrid_z, float voxel_x, float voxel_y, float voxel_z, int min_b_x, int min_b_y, int min_b_z) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < points_num; i += stride) { float t_x = x[i]; float t_y = y[i]; float t_z = z[i]; int voxel_id = voxelId(t_x, t_y, t_z, voxel_x, voxel_y, voxel_z, min_b_x, min_b_y, min_b_z, vgrid_x, vgrid_y, vgrid_z); GVoxel *voxel = voxel_grid + voxel_id; MatrixDevice centr = voxel->centroid(); MatrixDevice cov = voxel->covariance(); atomicAdd(voxel->pointNumAddress(), 1); atomicAddD(centr.cellAddr(0), t_x); atomicAddD(centr.cellAddr(1), t_y); atomicAddD(centr.cellAddr(2), t_z); atomicAddD(cov.cellAddr(0, 0), t_x * t_x); atomicAddD(cov.cellAddr(0, 1), t_x * t_y); atomicAddD(cov.cellAddr(0, 2), t_x * t_z); atomicAddD(cov.cellAddr(1, 0), t_y * t_x); atomicAddD(cov.cellAddr(1, 1), t_y * t_y); atomicAddD(cov.cellAddr(1, 2), t_y * t_z); atomicAddD(cov.cellAddr(2, 0), t_z * t_x); atomicAddD(cov.cellAddr(2, 1), t_z * t_y); atomicAddD(cov.cellAddr(2, 2), t_z * t_z); } } /* Second pass: update coordinate mean (centroid) and * covariance matrix of each cell */ extern "C" __global__ void updateVoxelCentroid(GVoxel *voxel_grid, int voxel_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int vid = index; vid < voxel_num; vid += stride) { GVoxel *node = voxel_grid + vid; int points_num = node->pointNum(); if (points_num == 0) return; MatrixDevice centr = node->centroid(); MatrixDevice cov = node->covariance(); MatrixDevice icov = node->inverseCovariance(); centr /= points_num; cov /= points_num; cov(0, 0) -= centr(0) * centr(0); cov(0, 1) -= centr(0) * centr(1); cov(0, 2) -= centr(0) * centr(2); cov(1, 0) = cov(0, 1); cov(1, 1) -= centr(1) * centr(1); cov(1, 2) -= centr(1) * centr(2); cov(2, 0) = cov(0, 2); cov(2, 1) = cov(1, 2); cov(2, 2) -= centr(2) * centr(2); Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> eigensolver; Eigen::Matrix3d eigen_val; Eigen::Vector3d pt_sum; Eigen::Matrix3d cov_mat; Eigen::Matrix3d eigen_vectors; Eigen::Matrix3d cov_mat_inverse; cov_mat(0, 0) = cov(0, 0); cov_mat(0, 1) = cov(0, 1); cov_mat(0, 2) = cov(0, 2); cov_mat(1, 0) = cov(1, 0); cov_mat(1, 1) = cov(1, 1); cov_mat(1, 2) = cov(1, 2); cov_mat(2, 0) = cov(2, 0); cov_mat(2, 1) = cov(2, 1); cov_mat(2, 2) = cov(2, 2); eigensolver.compute(cov_mat); eigen_val = eigensolver.eigenvalues().asDiagonal(); eigen_vectors = eigensolver.eigenvectors(); if (eigen_val(0,0) < 0 || eigen_val(1, 1) < 0 || eigen_val(2, 2) <= 0) { node->pointNum() = -1; return; } float min_eigen_val = eigen_val(2, 2) / 100; if (eigen_val(0, 0) < min_eigen_val) { eigen_val(0, 0) = min_eigen_val; if (eigen_val(1, 1) < min_eigen_val) eigen_val(1, 1) = min_eigen_val; cov_mat = eigen_vectors * eigen_val * eigen_vectors.inverse(); } cov_mat_inverse = cov_mat.inverse(); cov(0, 0) = cov_mat(0, 0); cov(0, 1) = cov_mat(0, 1); cov(0, 2) = cov_mat(0, 2); cov(1, 0) = cov_mat(1, 0); cov(1, 1) = cov_mat(1, 1); cov(1, 2) = cov_mat(1, 2); cov(2, 0) = cov_mat(2, 0); cov(2, 1) = cov_mat(2, 1); cov(2, 2) = cov_mat(2, 2); icov(0, 0) = cov_mat_inverse(0, 0); icov(0, 1) = cov_mat_inverse(0, 1); icov(0, 2) = cov_mat_inverse(0, 2); icov(1, 0) = cov_mat_inverse(1, 0); icov(1, 1) = cov_mat_inverse(1, 1); icov(1, 2) = cov_mat_inverse(1, 2); icov(2, 0) = cov_mat_inverse(2, 0); icov(2, 1) = cov_mat_inverse(2, 1); icov(2, 2) = cov_mat_inverse(2, 2); } } void GVoxelGrid::insertPoints() { int block_x = (points_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num_; int grid_x = (points_num_ - 1) / block_x + 1; insertPointsToGrid<<<grid_x, block_x>>>(x_, y_, z_, points_num_, global_voxel_, vgrid_x_, vgrid_y_, vgrid_z_, voxel_x_, voxel_y_, voxel_z_, min_b_x_, min_b_y_, min_b_z_); int voxel_num = vgrid_x_ * vgrid_y_ * vgrid_z_; block_x = (voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : voxel_num; grid_x = (voxel_num - 1) / block_x + 1; updateVoxelCentroid<<<grid_x, block_x>>>(global_voxel_, voxel_num); checkCudaErrors(cudaDeviceSynchronize()); } //Input are supposed to be in device memory void GVoxelGrid::setInput(float *x, float *y, float *z, int points_num) { if (points_num <= 0) return; x_ = x; y_ = y; z_ = z; points_num_ = points_num; findBoundaries(); checkCudaErrors(cudaMalloc(&global_voxel_, sizeof(GVoxel) * vgrid_x_ * vgrid_y_ * vgrid_z_)); findBoundaries(); initialize(); insertPoints(); } extern "C" __global__ void findMax(float *x, float *y, float *z, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) x[i] = (i + half_size < full_size) ? ((x[i] >= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i]; } extern "C" __global__ void findMin(float *x, float *y, float *z, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) x[i] = (i + half_size < full_size) ? ((x[i] <= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i]; } void GVoxelGrid::findBoundaries() { float *max_x, *max_y, *max_z, *min_x, *min_y, *min_z; checkCudaErrors(cudaMalloc(&max_x, sizeof(float) * points_num_)); checkCudaErrors(cudaMalloc(&max_y, sizeof(float) * points_num_)); checkCudaErrors(cudaMalloc(&max_z, sizeof(float) * points_num_)); checkCudaErrors(cudaMalloc(&min_x, sizeof(float) * points_num_)); checkCudaErrors(cudaMalloc(&min_y, sizeof(float) * points_num_)); checkCudaErrors(cudaMalloc(&min_z, sizeof(float) * points_num_)); checkCudaErrors(cudaMemcpy(max_x, x_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(max_y, y_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(max_z, z_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(min_x, x_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(min_y, y_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(min_z, z_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice)); int points_num = points_num_; while (points_num > 1) { int half_points_num = (points_num - 1) / 2 + 1; int block_x = (half_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_points_num; int grid_x = (half_points_num - 1) / block_x + 1; findMax<<<grid_x, block_x>>>(max_x, max_y, max_z, points_num, half_points_num); findMin<<<grid_x, block_x>>>(min_x, min_y, min_z, points_num, half_points_num); points_num = half_points_num; } checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(&max_x_, max_x, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&max_y_, max_y, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&max_z_, max_z, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&min_x_, min_x, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&min_y_, min_y, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&min_z_, min_z, sizeof(float), cudaMemcpyDeviceToHost)); max_b_x_ = static_cast<int> (floor(max_x_ / voxel_x_)); max_b_y_ = static_cast<int> (floor(max_y_ / voxel_y_)); max_b_z_ = static_cast<int> (floor(max_z_ / voxel_z_)); min_b_x_ = static_cast<int> (floor(min_x_ / voxel_x_)); min_b_y_ = static_cast<int> (floor(min_y_ / voxel_y_)); min_b_z_ = static_cast<int> (floor(min_z_ / voxel_z_)); vgrid_x_ = max_b_x_ - min_b_x_ + 1; vgrid_y_ = max_b_y_ - min_b_y_ + 1; vgrid_z_ = max_b_z_ - min_b_z_ + 1; } __device__ float squareDistance(float x, float y, float z, float a, float b, float c) { return (x - a) * (x - a) + (y - b) * (y - b) + (z - c) * (z - c); } extern "C" __global__ void radiusSearch1(float *x, float *y, float *z, float radius, int max_nn, int points_num, GVoxel *grid, int vgrid_x, int vgrid_y, int vgrid_z, float voxel_x, float voxel_y, float voxel_z, int min_b_x, int min_b_y, int min_b_z, int max_b_x, int max_b_y, int max_b_z, int *max_vid_x, int *max_vid_y, int *max_vid_z, int *min_vid_x, int *min_vid_y, int *min_vid_z, int *found_voxel_num, int *valid_points) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < points_num; i += stride) { float t_x = x[i]; float t_y = y[i]; float t_z = z[i]; int id_x = static_cast<int>(floorf(t_x / voxel_x) - static_cast<float>(min_b_x)); int id_y = static_cast<int>(floorf(t_y / voxel_y) - static_cast<float>(min_b_y)); int id_z = static_cast<int>(floorf(t_z / voxel_z) - static_cast<float>(min_b_z)); int max_id_x = static_cast<int>(ceilf((t_x + radius) / voxel_x) - static_cast<float>(min_b_x)); int max_id_y = static_cast<int>(ceilf((t_y + radius) / voxel_y) - static_cast<float>(min_b_y)); int max_id_z = static_cast<int>(ceilf((t_z + radius) / voxel_z) - static_cast<float>(min_b_z)); int min_id_x = static_cast<int>(ceilf((t_x - radius) / voxel_x) - static_cast<float>(min_b_x)); int min_id_y = static_cast<int>(ceilf((t_y - radius) / voxel_y) - static_cast<float>(min_b_y)); int min_id_z = static_cast<int>(ceilf((t_z - radius) / voxel_z) - static_cast<float>(min_b_z)); /* Find intersection of the cube containing the * NN sphere of the point and the voxel grid */ max_id_x = (max_id_x > max_b_x) ? max_b_x : max_id_x; max_id_y = (max_id_y > max_b_y) ? max_b_y : max_id_y; max_id_z = (max_id_z > max_b_z) ? max_b_z : max_id_z; min_id_x = (min_id_x < min_b_x) ? min_b_x : min_id_x; min_id_y = (min_id_y < min_b_y) ? min_b_y : min_id_y; min_id_z = (min_id_z < min_b_z) ? min_b_z : min_id_z; int nn = 0; for (int j = min_id_x; j <= max_id_x && nn < max_nn; j++) { for (int k = min_id_y; k <= max_id_y && nn < max_nn; k++) { for (int l = min_id_z; l <= max_id_z && nn < max_nn; l++) { int voxel_id = j + k * vgrid_x + l * vgrid_x * vgrid_y; GVoxel *voxel = grid + voxel_id; int point_num = voxel->pointNum(); float centroid_x = (point_num > 0) ? voxel->centroid()(0) : FLT_MAX; float centroid_y = (point_num > 0) ? voxel->centroid()(1) : FLT_MAX; float centroid_z = (point_num > 0) ? voxel->centroid()(2) : FLT_MAX; nn += (squareDistance(centroid_x, centroid_y, centroid_z, t_x, t_y, t_z) <= radius * radius) ? 1 : 0; } } } found_voxel_num[i] = nn; valid_points[i] = (nn == 0) ? 0 : 1; max_vid_x[i] = max_id_x; max_vid_y[i] = max_id_y; max_vid_z[i] = max_id_z; min_vid_x[i] = min_id_x; min_vid_y[i] = min_id_y; min_vid_z[i] = min_id_z; } } extern "C" __global__ void collectValidPoints(int *input, int *output, int *writing_location, int size) { for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < size; index += blockDim.x * gridDim.x) { if (input[index] != 0) { output[writing_location[index]] = index; } } } extern "C" __global__ void radiusSearch2(float *x, float *y, float *z, float radius, int max_nn, int points_num, GVoxel *grid, int vgrid_x, int vgrid_y, int vgrid_z, int *max_vid_x, int *max_vid_y, int *max_vid_z, int *min_vid_x, int *min_vid_y, int *min_vid_z, int *found_voxel_num, int *voxel_id) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < points_num; i += stride) { float t_x = x[i]; float t_y = y[i]; float t_z = z[i]; int max_id_x = max_vid_x[i]; int max_id_y = max_vid_y[i]; int max_id_z = max_vid_z[i]; int min_id_x = min_vid_x[i]; int min_id_y = min_vid_y[i]; int min_id_z = min_vid_z[i]; int nn = 0; int write_location = found_voxel_num[i]; int pn = found_voxel_num[i + 1] - found_voxel_num[i]; if (pn == 0) return; for (int j = min_id_x; j <= max_id_x && nn < max_nn; j++) { for (int k = min_id_y; k <= max_id_y && nn < max_nn; k++) { for (int l = min_id_z; l <= max_id_z && nn < max_nn; l++) { int vid = j + k * vgrid_x + l * vgrid_x * vgrid_y; GVoxel *voxel = grid + vid; int point_num = voxel->pointNum(); float centroid_x = (point_num > 0) ? voxel->centroid()(0) : FLT_MAX; float centroid_y = (point_num > 0) ? voxel->centroid()(1) : FLT_MAX; float centroid_z = (point_num > 0) ? voxel->centroid()(2) : FLT_MAX; if (squareDistance(centroid_x, centroid_y, centroid_z, t_x, t_y, t_z) <= radius * radius) { voxel_id[write_location] = vid; write_location++; nn++; } } } } } } template <typename T> void GVoxelGrid::ExclusiveScan(T *input, int ele_num, T *sum) { thrust::device_ptr<T> dev_ptr(input); thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr); checkCudaErrors(cudaDeviceSynchronize()); *sum = *(dev_ptr + ele_num - 1); } void GVoxelGrid::radiusSearch(float *qx, float *qy, float *qz, int points_num, float radius, int max_nn) { int block_x = (points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num; int grid_x = (points_num - 1) / block_x + 1; int *max_vid_x, *max_vid_y, *max_vid_z; int *min_vid_x, *min_vid_y, *min_vid_z; int *found_voxel_num; int *valid_point_tmp; checkCudaErrors(cudaMalloc(&max_vid_x, sizeof(int) * points_num)); checkCudaErrors(cudaMalloc(&max_vid_y, sizeof(int) * points_num)); checkCudaErrors(cudaMalloc(&max_vid_z, sizeof(int) * points_num)); checkCudaErrors(cudaMalloc(&min_vid_x, sizeof(int) * points_num)); checkCudaErrors(cudaMalloc(&min_vid_y, sizeof(int) * points_num)); checkCudaErrors(cudaMalloc(&min_vid_z, sizeof(int) * points_num)); checkCudaErrors(cudaMalloc(&found_voxel_num, sizeof(int) * (points_num + 1))); checkCudaErrors(cudaMalloc(&valid_point_tmp, sizeof(int) * (points_num + 1))); radiusSearch1<<<grid_x, block_x>>>(qx, qy, qz, radius, max_nn, points_num, global_voxel_, vgrid_x_, vgrid_y_, vgrid_z_, voxel_x_, voxel_y_, voxel_z_, min_b_x_, min_b_y_, min_b_z_, max_b_x_, max_b_y_, max_b_z_, max_vid_x, max_vid_y, max_vid_z, min_vid_x, min_vid_y, min_vid_z, valid_point_tmp, found_voxel_num); checkCudaErrors(cudaDeviceSynchronize()); int *query_status; checkCudaErrors(cudaMalloc(&query_status, sizeof(int) * points_num)); checkCudaErrors(cudaMemcpy(query_status, valid_point_tmp, sizeof(int) * points_num, cudaMemcpyDeviceToDevice)); ExclusiveScan(found_voxel_num, points_num + 1, &qresult_size_); starting_voxel_id_ = found_voxel_num; ExclusiveScan(valid_point_tmp, points_num + 1, &valid_points_num_); checkCudaErrors(cudaMalloc(&valid_points_, sizeof(int) * valid_points_num_)); checkCudaErrors(cudaMalloc(&voxel_id_, sizeof(int) * qresult_size_)); collectValidPoints<<<grid_x, block_x>>>(query_status, valid_points_, valid_point_tmp, points_num); radiusSearch2<<<grid_x, block_x>>>(qx, qy, qz, radius, max_nn, points_num, global_voxel_, vgrid_x_, vgrid_y_, vgrid_z_, max_vid_x, max_vid_y, max_vid_z, min_vid_x, min_vid_y, min_vid_z, starting_voxel_id_, voxel_id_); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(max_vid_x)); checkCudaErrors(cudaFree(max_vid_y)); checkCudaErrors(cudaFree(max_vid_z)); checkCudaErrors(cudaFree(min_vid_x)); checkCudaErrors(cudaFree(min_vid_y)); checkCudaErrors(cudaFree(min_vid_z)); checkCudaErrors(cudaFree(valid_point_tmp)); } }