serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
11,701
#include <cuda.h> /* template <unsigned int block_size> __global__ void step_forward(float *weight, float *bias, float *a, float *z, unsigned int prev_layer, unsigned int n_layer, unsigned int max_layer) { unsigned int tid = blockDim.x*blockDim.y*threadIdx.z + blockDim.x*threadIdx.y + threadIdx.x; unsigned int block_id = gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y + blockIdx.x; __shared__ float partial_sums[block_size]; if (tid < prev_layer) { partial_sums[tid] = a[tid]*weight[block_id*max_layer+tid]; } else { partial_sums[tid] = 0; } __syncthreads(); if (block_size > 512) { if (tid < 512) { partial_sums[tid] += partial_sums[tid+512]; } __syncthreads(); } if (block_size > 256) { if (tid < 256) { partial_sums[tid] += partial_sums[tid+256]; } __syncthreads(); } if (block_size > 128) { if (tid < 128) { partial_sums[tid] += partial_sums[tid+128]; } __syncthreads(); } if (block_size > 64) { if (tid < 64) { partial_sums[tid] += partial_sums[tid+ 64]; } __syncthreads(); } if (block_size > 32) { if (tid < 32) { partial_sums[tid] += partial_sums[tid+ 32]; } __syncthreads(); } if (block_size > 16) { if (tid < 16) { partial_sums[tid] += partial_sums[tid+ 16]; } __syncthreads(); } if (block_size > 8) { if (tid < 8) { partial_sums[tid] += partial_sums[tid+ 8]; } __syncthreads(); } if (block_size > 4) { if (tid < 4) { partial_sums[tid] += partial_sums[tid+ 4]; } __syncthreads(); } if (block_size > 2) { if (tid < 2) { partial_sums[tid] += partial_sums[tid+ 2]; } __syncthreads(); } if (block_size > 1) { if (tid < 1) { partial_sums[tid] += partial_sums[tid+ 1]; //z[L*(i-1)+block_id] = i; //a[L*i+block_id] = i; z[block_id] = partial_sums[0] + bias[block_id];// + biases[(i-1)*L+block_id]; a[max_layer+block_id] = 1/(1+expf(-z[block_id]));//1/(1+expf(-z[L*(i-1)+block_id])); } } } */ /* * * * */ template <unsigned int block_size> __global__ void step_forward1(float *weight, float *bias, float *a, float *z, unsigned int prev_layer_size, unsigned int layer_size) { unsigned int tid = blockDim.x*threadIdx.y + threadIdx.x; unsigned int j = (gridDim.x*blockIdx.y + blockIdx.x)*block_size + tid; //column unsigned int i = blockIdx.z; //row __shared__ float partial_sums[block_size]; if (j<prev_layer_size) { partial_sums[tid] = a[j]*weight[i*prev_layer_size+j]; } else { partial_sums[tid] = 0; } __syncthreads(); if (block_size > 512) { if (tid < 512) { partial_sums[tid] += partial_sums[tid+512]; } __syncthreads(); } if (block_size > 256) { if (tid < 256) { partial_sums[tid] += partial_sums[tid+256]; } __syncthreads(); } if (block_size > 128) { if (tid < 128) { partial_sums[tid] += partial_sums[tid+128]; } __syncthreads(); } if (block_size > 64) { if (tid < 64) { partial_sums[tid] += partial_sums[tid+ 64]; } __syncthreads(); } if (block_size > 32) { if (tid < 32) { partial_sums[tid] += partial_sums[tid+ 32]; } __syncthreads(); } if (block_size > 16) { if (tid < 16) { partial_sums[tid] += partial_sums[tid+ 16]; } __syncthreads(); } if (block_size > 8) { if (tid < 8) { partial_sums[tid] += partial_sums[tid+ 8]; } __syncthreads(); } if (block_size > 4) { if (tid < 4) { partial_sums[tid] += partial_sums[tid+ 4]; } __syncthreads(); } if (block_size > 2) { if (tid < 2) { partial_sums[tid] += partial_sums[tid+ 2]; } __syncthreads(); } if (tid < 1) { if (gridDim.x*gridDim.y == 1) { partial_sums[0] += partial_sums[1]+bias[i]; z[i] = partial_sums[0]; a[i] = 1/(1+expf(-partial_sums[0])); } else { partial_sums[0] += partial_sums[1]; z[layer_size*(gridDim.x*blockIdx.y + blockIdx.x)+i] = partial_sums[0]; } } } template <unsigned int block_size> __global__ void step_forward2(float *z, float *bias, float *a, unsigned int layer_size) { unsigned int tid = blockDim.x*threadIdx.y + threadIdx.x; unsigned int i = blockIdx.z; __shared__ float partial_sums[block_size]; partial_sums[tid] = z[tid*layer_size + i]; __syncthreads(); if (block_size > 32) { if (tid < 32) { partial_sums[tid] += partial_sums[tid+ 32]; } __syncthreads(); } if (block_size > 16) { if (tid < 16) { partial_sums[tid] += partial_sums[tid+ 16]; } __syncthreads(); } if (block_size > 8) { if (tid < 8) { partial_sums[tid] += partial_sums[tid+ 8]; } __syncthreads(); } if (block_size > 4) { if (tid < 4) { partial_sums[tid] += partial_sums[tid+ 4]; } __syncthreads(); } if (block_size > 2) { if (tid < 2) { partial_sums[tid] += partial_sums[tid+ 2]; } __syncthreads(); } if (tid < 1) { partial_sums[0] += partial_sums[1] + bias[i]; z[i] = partial_sums[0]; a[i] = 1/(1+expf(-partial_sums[0])); } } void step_forward_wrapper(float *weight, float *bias, float *a, float *z, unsigned int prev_layer_size, unsigned int layer_size) { dim3 grid(1, 1, layer_size); dim3 block(1, 1, 1); if (prev_layer_size > 1024) { //here we need more than step_forward1 block.x = 32; block.y = 32; //here z must be of size layer_size*(prev_layer_size/1024) if (prev_layer_size/1024 > 32) { //max_layer<=65535 ~= 1024 * 64 grid.x = 8; grid.y = 8; step_forward1<1024><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); block.x = 8; block.y = 8; grid.x = 1; grid.y = 1; step_forward2<64><<<grid, block>>>(z, bias, a, layer_size); } else if (prev_layer_size/1024 > 16) { grid.x = 8; grid.y = 4; step_forward1<1024><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); block.x = 8; block.y = 4; grid.x = 1; grid.y = 1; step_forward2<32><<<grid, block>>>(z, bias, a, layer_size); } else if (prev_layer_size/1024 > 8) { grid.x = 4; grid.y = 4; step_forward1<1024><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); block.x = 4; block.y = 4; grid.x = 1; grid.y = 1; step_forward2<16><<<grid, block>>>(z, bias, a, layer_size); } else if (prev_layer_size/1024 > 4) { grid.x = 4; grid.y = 2; step_forward1<1024><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); block.x = 4; block.y = 2; grid.x = 1; grid.y = 1; step_forward2<8><<<grid, block>>>(z, bias, a, layer_size); } else if (prev_layer_size/1024 > 2) { grid.x = 2; grid.y = 2; step_forward1<1024><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); block.x = 2; block.y = 2; grid.x = 1; grid.y = 1; step_forward2<4><<<grid, block>>>(z, bias, a, layer_size); } else if (prev_layer_size/1024 > 1) { grid.x = 2; grid.y = 1; step_forward1<1024><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); block.x = 2; block.y = 1; grid.x = 1; grid.y = 1; step_forward2<2><<<grid, block>>>(z, bias, a, layer_size); } } else if (prev_layer_size > 512) { block.x = 32; block.y = 32; step_forward1<1024><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } else if (prev_layer_size > 256) { block.x = 32; block.y = 16; step_forward1<512><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } else if (prev_layer_size > 128) { block.x = 16; block.y = 16; step_forward1<256><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } else if (prev_layer_size > 64) { block.x = 16; block.y = 8; step_forward1<128><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } else if (prev_layer_size > 32) { block.x = 8; block.y = 8; step_forward1<64><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } else if (prev_layer_size > 16) { block.x = 8; block.y = 4; step_forward1<32><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } else if (prev_layer_size > 8) { block.x = 4; block.y = 4; step_forward1<16><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } else if (prev_layer_size > 4) { block.x = 4; block.y = 2; step_forward1<8><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } else if (prev_layer_size > 2) { block.x = 2; block.y = 2; step_forward1<4><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } else if (prev_layer_size > 1) { block.x = 2; block.y = 1; step_forward1<2><<<grid, block>>>(weight, bias, a, z, prev_layer_size, layer_size); } }
11,702
#include "includes.h" __global__ void imageNormalizationKernel(float3 *ptr, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= width || y >= height) { return; } float3 color = ptr[y * width + x]; color.x = (color.x - 127.5) * 0.0078125; color.y = (color.y - 127.5) * 0.0078125; color.z = (color.z - 127.5) * 0.0078125; ptr[y * width + x] = make_float3(color.x, color.y, color.z); }
11,703
//--blockDim=1024 --gridDim=1024 /* * A kernel that exhibits barrier divergence. * Although when executing this kernel all * threads will reach *some* barrier, they * will not all reach the *same* barrier * which is what is required in CUDA. */ __global__ void diverge(/* no inputs or outputs in this illustrative example */) { int tid = threadIdx.x; if (tid == 0) { __syncthreads(); } else { __syncthreads(); } }
11,704
#include <iostream> #include <cstdlib> using namespace std; int rand(); float RandomNumber(float Min, float Max) { return ((float(rand()) / float(RAND_MAX)) * (Max - Min)) + Min; } float sigmoid(float x){ return 1 / (1 + exp(-x)); } float sigmoid_der(float x){ return sigmoid(x) * (1 - sigmoid(x)); } float * dot_matrix(float m1[3], float m2[3]){ // dot product code will be here // m = matrik indeks 1 static float C[1]; C[0] = 0; for (int j = 0; j < 3; j++){ C[0] += m1[j] * m2[j]; } return C; } int main(){ cout << "Neural Network Start" << endl; float feature_set[5][3] = {{0,1,0},{0,0,1},{1,0,0},{1,1,0},{1,1,1}}; float label[5][1] = {{1},{0},{0},{1},{1}}; float *inputs; float suminput; float activation1; int ri; float error, dcost_dpred, dpred_dz, z_delta; float weight[3]; float bias[1][1]; float learning_rate = 0.005; // filling weight with random number for(int i = 0; i < 3; i++){ weight[i] = RandomNumber(-1, 1); } // Training Phase int epoch = 1000; for(int i = 0; i < epoch; i++){ ri = rand() % 5; inputs = dot_matrix(feature_set[ri], weight); for(int j = 0; j < 5; j++) suminput += inputs[j]; // suminput += bias[0][0]; activation1 = sigmoid(suminput); error = activation1 - label[ri][0]; dcost_dpred = error; dpred_dz = sigmoid_der(activation1); z_delta = dcost_dpred * dpred_dz; for(int j = 0; j < 3; j++){ weight[j] -= (learning_rate * inputs[j] * z_delta); } // bias -= learning_rate * z_delta; cout << i <<" activation : " << activation1 << " Data Ke : " << ri << " error : " << error << endl; } return 0; }
11,705
#include <stdio.h> #include <ctime> #include "cuda_runtime.h" #define array_len 200 __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = sizeof(int) * array_len; a = (int*)malloc(size); b = new int[array_len]; c = new int[array_len]; cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); std::srand(std::time(0)); for(int i = 0; i < array_len; i++) { a[i] = 1 + std::rand() % 100; b[i] = 1 + std::rand() % 100; } cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); add<<<array_len,1>>>(d_a, d_b, d_c); cudaDeviceSynchronize(); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("Calculated\n"); for(int i = 0; i < array_len; i++) printf("%-3d + %-3d = %-3d\n", a[i], b[i], c[i]); free(a); free(b); free(c); return 0; }
11,706
#include<stdio.h> #include<cuda.h> #define ROW 100 #define COL 1000 //Check Error #define printError(func) \ { \ cudaError_t E = func; \ if(E != cudaSuccess) \ { \ printf( "\nError at line: %d ", __LINE__); \ printf( "\nError: %s ", cudaGetErrorString(E)); \ } \ } \ //Kernel __global__ void add(int A[][COL], int B[][COL], int C[][COL]) { unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; if( x < ROW && y < COL ) C[x][y] = B[x][y] + A[x][y]; } //To check the output to see if it matches int checkSum(int A[][COL], int B[][COL], int C[][COL]) { for(int i = 0; i<ROW; i++) for(int j = 0; j<COL; j++) if(C[i][j] != A[i][j] + B[i][j]) return 0; return 1; } int main() { int A[ROW][COL]; int B[ROW][COL]; int C[ROW][COL]; int (*deviceA)[COL]; int (*deviceB)[COL]; int (*deviceC)[COL]; for(int i=0; i<ROW; i++) { for(int j=0; j<COL; j++) { A[i][j] = rand()%1000; B[i][j] = rand()%1000; } } printError(cudaMalloc((void **)&deviceA, ROW * COL * sizeof(int))); printError(cudaMalloc((void **)&deviceB, ROW * COL * sizeof(int))); printError(cudaMalloc((void **)&deviceC, ROW * COL * sizeof(int))); cudaMemcpy(deviceA, A, ROW * COL * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceB, B, ROW * COL * sizeof(int), cudaMemcpyHostToDevice); dim3 local_size(8, 8); dim3 global_size(ceil(ROW/8.0), ceil(COL/8.0)); add<<<global_size, local_size>>>(deviceA, deviceB, deviceC); cudaMemcpy(C, deviceC, ROW * COL * sizeof(int), cudaMemcpyDeviceToHost); /* for(int i=0; i<ROW; i++) { for(int j=0; j<COL; j++) { printf("%d : %d, ", A[i][j] + B[i][j], C[i][j]); } printf("\n"); } */ if(checkSum(A, B, C)) printf("\nResult of 2 matrix sum is correct\n"); else printf("\nResult of 2 matrix sum is wrong\n"); cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); }
11,707
#include "includes.h" // INCLUDES // CUDA // GIS /** * PARS */ #define BLOCK_DIM_small 64 #define BLOCK_DIM 256 static const unsigned int threads = 512; bool print_intermediate_arrays = false; const char *BASE_PATH = "/home/giuliano/git/cuda/reduction"; /* * kernel labels */ const char *kern_0 = "filter_roi"; const char *kern_1 = "imperviousness_change_histc_sh_4" ; const char *kern_2 = "imperviousness_change" ; char buffer[255]; /* * DEFINE I/O files */ // I/– //const char *FIL_ROI = "/home/giuliano/git/cuda/reduction/data/ROI.tif"; //const char *FIL_BIN1 = "/home/giuliano/git/cuda/reduction/data/BIN1.tif"; //const char *FIL_BIN2 = "/home/giuliano/git/cuda/reduction/data/BIN2.tif"; const char *FIL_ROI = "/media/DATI/db-backup/ssgci-data/testing/ssgci_roi.tif"; const char *FIL_BIN1 = "/media/DATI/db-backup/ssgci-data/testing/ssgci_bin.tif"; const char *FIL_BIN2 = "/media/DATI/db-backup/ssgci-data/testing/ssgci_bin2.tif"; // –/O const char *FIL_LTAKE_grid = "/home/giuliano/git/cuda/reduction/data/LTAKE_map.tif"; const char *FIL_LTAKE_count= "/home/giuliano/git/cuda/reduction/data/LTAKE_count.txt"; /* +++++DEFINEs+++++ */ __global__ void imperviousness_change_large( const unsigned char *dev_BIN1, const unsigned char *dev_BIN2, unsigned int WIDTH, unsigned int HEIGHT, int *dev_LTAKE_map, int mapel_per_thread ) { unsigned long int x = threadIdx.x; unsigned long int bdx = blockDim.x; unsigned long int bix = blockIdx.x; //unsigned long int gdx = gridDim.x; unsigned long int tid = bdx*bix + x; // offset unsigned long int tix = tid * mapel_per_thread; // offset //extern __shared__ int sh_diff[]; if( bdx*bix*mapel_per_thread < WIDTH*HEIGHT ){ //sh_diff[tid] = 0; syncthreads(); for(long int ii=0;ii<mapel_per_thread;ii++){ if( tix+ii < WIDTH*HEIGHT ){ //sh_diff[tid] = (int)((int)dev_BIN2[tix+ii] - (int)dev_BIN1[tix+ii]); dev_LTAKE_map[tix+ii] = (int)((int)dev_BIN2[tix+ii] - (int)dev_BIN1[tix+ii]); } //__syncthreads(); //dev_LTAKE_map[tix+ii] = sh_diff[tid]; } } }
11,708
#include "includes.h" __global__ void VectorMultiplicationKernel(int *array, int arrayCount) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < arrayCount) { array[idx] *= array[idx]; } }
11,709
#include <sys/time.h> #include <cuda.h> #include <stdio.h> // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ for (int i =0;i<nx;i++){ for(int j=0;j<ny;j++){ C[i*ny+j] = A[i*ny+j]+B[i*ny+j]; } } return; } // device-side matrix addition __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ int ix = threadIdx.x + blockIdx.x*(blockDim.x) ; int iy = threadIdx.y + blockIdx.y*(blockDim.y) ; int idx = ix*ny + iy ; if( (ix<nx) && (iy<ny) ){ C[idx] = A[idx] + B[idx] ; //printf("Thread %d %d\n",ix,iy); } } void initData(float *M, int x, int y, int width, int flag ){ if(flag) { //printf("A\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*width+j] = (float)(i+j)/3.0; } } } else { //printf("B\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*width+j] = (float)3.14*(i+j) ; } } } } int main( int argc, char *argv[] ) { if (argc!=3){ printf("Error: Invalid number of arguments.\n"); exit(1); } int nx = atoi( argv[1] ) ; // should check validity int ny = atoi( argv[2] ) ; // should check validity if(nx <=0 || ny <=0){ printf("Error: Dimension lessThanOrEqualto Zero.\n"); exit(1); } int my=0; /* if((ny%16) != 0){ my = 16 - (ny%16); } */ int noElems = (nx)*(ny+my) ; /* if (nx%32 == 0){ noElems = (nx+1)*(ny) ; } */ int bytes = noElems * sizeof(float) ; //printf ("%d %d %d %d \n",(nx*ny),(noElems),mx,my); // GPU and CPU memory Allocations float *d_A, *d_B, *d_C ; cudaMalloc( (void **) &d_A, bytes ) ; cudaMalloc( (void **) &d_B, bytes ) ; cudaMalloc( (void **) &d_C, bytes ) ; float *h_hC = (float *) malloc( bytes ) ; // host result float *h_Ap, *h_Bp, *h_dCp; cudaMallocHost( (float **) &h_Ap, bytes ) ; cudaMallocHost( (float **) &h_Bp, bytes ) ; cudaMallocHost( (float **) &h_dCp, bytes ) ; //cudaMemset(h_Ap,0,bytes); //cudaMemset(h_Bp,0,bytes); //cudaMemset(h_dCp,0,bytes); // init matrices with random data initData(h_Ap,nx,ny,ny+my,1); initData(h_Bp,nx,ny,ny+my,0); double timeStampA = getTimeStamp() ; //transfer data to dev cudaMemcpy( d_A, h_Ap, bytes, cudaMemcpyHostToDevice ) ; cudaMemcpy( d_B, h_Bp, bytes, cudaMemcpyHostToDevice ) ; double timeStampB = getTimeStamp() ; // invoke Kernel dim3 block( 16, 16) ; // you will want to configure this dim3 grid( (nx+block.x-1)/block.x, (ny+block.y-1)/block.y) ; //printf("Grid %d %d \n",(nx+block.x-1)/block.x,(ny+my)/block.y); f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny+my ) ; cudaDeviceSynchronize() ; double timeStampC = getTimeStamp() ; //copy data back cudaMemcpy(h_dCp, d_C, bytes, cudaMemcpyDeviceToHost); double timeStampD = getTimeStamp() ; // free GPU resources cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ; // CPU Matrix add h_addmat( h_Ap, h_Bp, h_hC, nx, ny+my ) ; // Check results int flag = 0; for(int i=0;i<(nx);i++){ for(int j=0;j<(ny+my);j++){ if(h_hC[i*(ny+my)+j] != h_dCp[i*(ny+my)+j]) flag=1; } } if (flag == 0){ printf("%.6f %.6f %.6f %.6f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC)); } else{ printf("Not eq"); } //free other resourses cudaFreeHost(h_Ap); cudaFreeHost(h_Bp); cudaFreeHost(h_dCp); free(h_hC); cudaDeviceReset() ; }
11,710
#include<stdio.h> #include<cuda.h> __global__ void mtrxAdd(float* d_a, float* d_b, float* d_c) { int i = threadIdx.x; int j = blockIdx.x; int n = blockDim.x; float bij = d_b[i*n+j]; float cij = d_c[i*n+j]; d_a[i*n+j]=bij+cij; } int main(int argc, char** argv) { const int ARRAY_N = 8; const int n = ARRAY_N; const int ARRAY_BYTES = ARRAY_N * ARRAY_N * sizeof(float); float h_b[ARRAY_N*ARRAY_N]; float h_c[ARRAY_N*ARRAY_N]; for (int i=0; i < ARRAY_N; i++) { for (int j=0; j < ARRAY_N; j++) { h_b[i*n+j] = float(i); h_c[i*n+j] = float(j); } } float h_a[ARRAY_N*ARRAY_N]; //declare GPU memory pointers float *d_a; float *d_b; float *d_c; //allocate memory on the device cudaMalloc((void**)&d_a,ARRAY_BYTES); cudaMalloc((void**)&d_b,ARRAY_BYTES); cudaMalloc((void**)&d_c,ARRAY_BYTES); //transfer the array to the GPU //destination, source, size, method cudaMemcpy(d_c,h_c,ARRAY_BYTES,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,ARRAY_BYTES,cudaMemcpyHostToDevice); cudaMemcpy(d_a,h_a,ARRAY_BYTES,cudaMemcpyHostToDevice); //launch the kernel mtrxAdd<<<ARRAY_N,ARRAY_N>>>(d_a,d_b,d_c); cudaDeviceSynchronize(); //copy the results back onto the device //destination, source, size, method cudaMemcpy(h_a,d_a,ARRAY_BYTES,cudaMemcpyDeviceToHost); for (int i=0; i<ARRAY_N; i++) { for (int j=0; j<ARRAY_N; j++) { if (h_a[i*n+j] < 10) printf(" "); printf("%.2f ",h_a[i*n+j],h_b[i*n+j],h_c[i*n+j]); } printf("\n"); } //free memory previously allocated on the device cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
11,711
/*Este programa recibe un archivo CSV con 64 LiDAR data packets y devuelve un vector de 16384 valores en double con informacion de radios de los puntos escaneados*/ #include <stdio.h> #include <stdlib.h> #define _USE_MATH_DEFINES #include <math.h> #define NUM_POINTS 16384 __global__ void Conversion(float* r, unsigned long int* encoder_count, float* altitude, float* azimuth, float* point_cloud) { int azimuth_block, channel; unsigned long int counter; float theta, phi; int i = blockIdx.x * blockDim.x + threadIdx.x; azimuth_block = i / 16; counter = (encoder_count[0] + azimuth_block * 88) % 90112; channel = i % 16; theta = (float)(2 * M_PI * (counter / 90112.0 + azimuth[channel] / 360.0)); phi = (float)(2 * M_PI * altitude[channel] / 360.0); point_cloud[0 + 3 * i] = (float)(r[i] * cos(theta) * cos(phi));//x point_cloud[1 + 3 * i] = (float)(-r[i] * sin(theta) * cos(phi));//y point_cloud[2 + 3 * i] = (float)(r[i] * sin(phi));//z } int main(void) { ///////Bloque 1: Abrir y leer los archivos Donut y beam_intrinsics y ////// int i = 0; const int N_LINE = 128;//numero maximo de caracteres a leer en cada linea char line[N_LINE]; FILE* document; document = fopen("Donut_1024x16.csv", "r");//abrir el archivo if (!document) {//revisar si fue correctamente abierto perror("File opening failed"); return 0; } float* h_r = NULL;//radios size_t bytes_r = NUM_POINTS*sizeof(float); h_r = (float*)malloc(bytes_r); unsigned long int h_encoder_count = 0;//contador inicial del encoder (luego crece en 88 ticks) int offset = 0; unsigned long int word = 0; int channel = 2; int azimuth_block = 0; int lidar_packet = 0; int idx_line;//indice palabra a leer int j = 1;//numero de linea while (fgets(line, N_LINE, document) != NULL) { //obtener el primer valor de encoder_count if (j == 13) h_encoder_count = atoi(line); if (j == 14) h_encoder_count = atoi(line) << 8 | h_encoder_count; //leer radios del archivo Donut idx_line = 17 + 12 * channel + 788 * azimuth_block + 12608 * lidar_packet; if (j == idx_line) word = (unsigned long int) atoi(line); if (j == idx_line + 1) word = (unsigned long int) (atoi(line) << 8) | word; if (j == idx_line + 2) word = (unsigned long int) ((atoi(line) & 0x0000000F)<<16) | word; if (j > (idx_line + 2))//si se leyo el radio, pasar al sgte channel { h_r[offset] = (float)word; offset++; channel += 4; } if (channel >= 64)//si se terminaron los channels del bloque, pasar al sgte azimuth block { channel = 2; azimuth_block++; } if (azimuth_block >= 16)//si se terminaron los azimuth blocks, pasar al sgte lidar packet { azimuth_block = 0; lidar_packet++; } if (lidar_packet >= 64) break;//si se terminaron los lidar packets, salir j++; } fclose(document); //printf("%ld\n",h_encoder_count); //for(i=0;i<100;i++) printf("%.3f\n",h_r[i]); //lectura del archivo beam_intrinsics document = fopen("beam_intrinsics.csv", "r");//abrir el archivo if (!document) {//revisar si fue correctamente abierto perror("File opening failed"); return 0; } float *h_altitude = NULL; float *h_azimuth = NULL; size_t bytes_angles = 16 * sizeof(float);//16 channels h_altitude = (float*)malloc(bytes_angles); h_azimuth = (float*)malloc(bytes_angles); j = 1; while (fgets(line, N_LINE, document) != NULL) { //leer altitute angles if (j == 2) offset = 0; if (j >= 2 && j <= 65) { if (j % 4 == 0) { h_altitude[offset] = (float)atof(line); offset++; } } //leer azimuth angles if (j == 68) offset = 0; if (j >= 68 && j <= 131) { if ((j - 66) % 4 == 0) { h_azimuth[offset] = (float)atof(line); offset++; } } j++; } fclose(document); //for(i=0;i<16;i++) printf("%.3f\n",h_altitude[i]); //for(i=0;i<16;i++) printf("%.3f\n",h_azimuth[i]); ///////Fin del Bloque 1/////// ///////Bloque 2: Conversion a coordenadas cartesianas/////// //reservar memoria para el puntero de salida float *h_point_cloud = NULL; h_point_cloud = (float*)malloc(3 * bytes_r); //declaracion de variables y reserva de memoria en el GPU float *d_point_cloud = NULL;//arreglo con los puntos en coordenadas cartesianas //formato: x1y1z1 x2y2z2 x3y3z3 ... float *d_r = NULL; float *d_azimuth = NULL; float *d_altitude = NULL; unsigned long int* d_encoder_count; cudaMalloc(&d_point_cloud, 3*bytes_r); cudaMalloc(&d_r, bytes_r); cudaMalloc(&d_azimuth, bytes_angles); cudaMalloc(&d_altitude, bytes_angles); cudaMalloc(&d_encoder_count, sizeof(unsigned long int)); //mover data a GPU cudaMemcpy(d_r,h_r,bytes_r,cudaMemcpyHostToDevice); cudaMemcpy(d_azimuth,h_azimuth,bytes_angles,cudaMemcpyHostToDevice); cudaMemcpy(d_altitude,h_altitude,bytes_angles,cudaMemcpyHostToDevice); cudaMemcpy(d_encoder_count,&h_encoder_count,sizeof(unsigned long int),cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int BlockSize = NUM_POINTS/16; int GridSize = 16; //lanzar el kernel cudaEventRecord(start); Conversion<<<GridSize,BlockSize>>>(d_r, d_encoder_count, d_altitude, d_azimuth, d_point_cloud); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Kernel's elapsed time: %.3f ms\n",milliseconds); //mover data de salida al CPU cudaMemcpy(h_point_cloud, d_point_cloud, 3 * bytes_r, cudaMemcpyDeviceToHost); ///////Fin del Bloque 2/////// ///////Bloque 3: Escribir los puntos en un documento de salida (Output_file.csv)/////// //abrir el documento a llenar document = fopen("Output_file.csv", "w"); if (!document) { perror("File opening failed"); return 0; } //llenar el documento con datos for (i = 0; i < NUM_POINTS; i++) { for (j = 0; j < 2; j++) fprintf(document, "%.4f, ", h_point_cloud[j + i * 3]); fprintf(document, "%.4f\n ", h_point_cloud[j + i * 3]); } fclose(document); printf("Success!\n"); ///////Fin del Bloque 3/////// //liberar memoria free(document); free(h_r), free(h_altitude), free(h_azimuth), free(h_point_cloud); cudaFree(d_r), cudaFree(d_altitude), cudaFree(d_azimuth), cudaFree(d_point_cloud), cudaFree(d_encoder_count); return 1; }
11,712
// device code // author: Pan Yang // date : 2015-7-1 #include <stdio.h> __global__ void mykernel(void) { } int main(void) { mykernel<<<1,1>>>(); printf("cu: Hello World!\n"); return 0; }
11,713
#include <iostream> #include <cstring> #include <algorithm> using namespace std; int a[35]; //【方法1】排序後取中間那一個。 int median(int i){ int b[3] = {a[i-1], a[i-2], a[i-3]}; sort(b, b+3); return b[1]; } int main() { ios_base::sync_with_stdio(false); cin.tie(0); int n; while (cin >> n){ for (int i=0; i<n; i++){ cin >> a[i]; } for (int i=3; i<n; i++){ if (abs(a[i] - a[i-1]) < 5) continue; a[i] = median(i); } for (int i=0; i<n; i++) cout << a[i] << ' '; cout << '\n'; } return 0; }
11,714
#include "includes.h" __global__ void blur(uchar3 *input, uchar3 *output,int width, int height) { int matrix[7][7] = {{0,0,1,2,1,0,0},{0,3,13,22,13,3,0},{1,3,59,97,59,13,1},{2,22,97,159,97,22,2},{1,3,59,97,59,3,1},{0,3,13,22,13,3,0},{0,0,1,2,1,0,0}}; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //if ((gridDim.x * gridDim.y) < width * height){ int tid = y*width + x; int outputTemp = 0; int sommeCoef = 0; if (x<width){ if (y<height){ if (x>3 && x<width-3 && y>3 && y<height-3){ for (int i=0; i<7; i++){ for (int j=0; j<7; j++){ outputTemp += input[(y-3+i)*width+(x-3+j)].x*matrix[j][i]; sommeCoef += matrix[j][i]; } output[tid].x = outputTemp / sommeCoef; output[tid].z = output[tid].y = output[tid].x; } } } } // } }
11,715
#include "includes.h" /** * Quantum Lattice Boltzmann * (c) 2015 Fabian Thüring, ETH Zurich * * This file contains all the CUDA kernels and function that make use of the * CUDA runtime API */ // Local includes // ==== CONSTANTS ==== __constant__ unsigned int d_L; __constant__ float d_dx; __constant__ float d_dt; __constant__ float d_mass; __constant__ float d_g; __constant__ unsigned int d_t; __constant__ float d_scaling; __constant__ int d_current_scene; // ==== INITIALIZATION ==== __global__ void kernel_calculate_normal_V(float3* vbo_ptr, float* d_ptr) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; if(i < d_L && j < d_L) { int ik = (i + 1) % d_L; int jk = (d_L - 1 + j) % d_L; // x float x2 = d_scaling * fabsf( d_ptr[i*d_L +j] ); // a float a1 = d_dx; float a2 = d_scaling * fabsf( d_ptr[ik*d_L +j] ) - x2; // b float b2 = d_scaling * fabsf( d_ptr[i*d_L +jk] ) - x2; float b3 = -d_dx; // n = a x b float3 n; n.x = a2*b3; n.y = -a1*b3; n.z = a1*b2; // normalize float norm = sqrtf(n.x*n.x + n.y*n.y + n.z*n.z); vbo_ptr[d_L*i + j].x = n.x/norm; vbo_ptr[d_L*i + j].y = n.y/norm; vbo_ptr[d_L*i + j].z = n.z/norm; } }
11,716
#include<cstdio> #include "vector_types.h" #define pi 3.14159265359 #define pos(x,y) ((x) + (y)*w) extern "C" { __global__ void convolution(unsigned char* src, int w, int h, char* convo, int convo_w, int convo_h, unsigned char* dst){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; __shared__ int shr_convo[32][32]; if(threadIdx.x == 0 && threadIdx.y == 0){ int p = 0; for(int i = 0; i < convo_w; i++){ for(int j = 0; j < convo_h; j++){ shr_convo[i][j] = convo[p++]; } } } // end convo load __syncthreads(); int computed_pixel = 0; int computed_weigth = 0; int radius_x = convo_w / 2, radius_y = convo_h / 2; int s_x = x - radius_x, s_y = y - radius_y; for(int i = 0; i < convo_w; i++){ for(int j = 0; j < convo_h; j++){ if(s_x >= 0 && s_x < w && s_y >= 0 && s_y < h){ computed_pixel += shr_convo[i][j] * src[s_x + s_y * w]; computed_weigth += shr_convo[i][j]; } s_y++; } s_y -= convo_h; s_x++; } int val = computed_weigth > 0 ? computed_pixel / computed_weigth : computed_pixel; dst[pos(x,y)] = val; } // l - light __global__ void sobel(unsigned char* l, unsigned char* magnitude, unsigned char* direction, int w, int h){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w - 2 || y >= h - 2) return; if (x < 1 || y < 1) return; int dx = 0 + l[pos(x-1, y-1)] - l[pos(x+1, y-1)] + 2*l[pos(x-1, y)] - 2*l[pos(x+1, y)] + l[pos(x-1, y+1)] - l[pos(x+1, y+1)]; int dy = 0 + l[pos(x-1, y-1)] + 2*l[pos(x, y-1)] + l[pos(x+1, y-1)] - l[pos(x-1, y+1)] - 2*l[pos(x, y+1)] - l[pos(x+1, y+1)]; int mag = sqrt((double)dx*dx + dy*dy); int dir = atan((double)dy/dx); magnitude[x + y*w] = mag; int degree = dir * 180.0 / pi; if (degree < 0) degree += 180; direction[x+ y*w] = degree; } __global__ void suppression(unsigned char* magnitude, unsigned char* direction, unsigned char* result_magnitude, int w, int h){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w - 2 || y >= h - 2) return; if (x < 1 || y < 1) return; int degree = direction[x + y * w]; int mag = magnitude[x + y * w]; bool greatest = false; if(degree < 22 || degree > 180 - 22){ // poziomo if(mag > magnitude[pos(x-1, y)] && mag > magnitude[pos(x+1, y)]) greatest = true; } else if (degree < 45+22){ // gora prawo if(mag > magnitude[pos(x+1, y+1)] && mag > magnitude[pos(x-1, y-1)]) greatest = true; } else if (degree < 90 + 22) { // pionowo if(mag > magnitude[pos(x,y+1)] && mag > magnitude[pos(x, y-1)]) greatest = true; } else { // gora lewo if(mag > magnitude[pos(x-1,y+1)] && mag > magnitude[pos(x+1, y-1)]) greatest = true; } if(!greatest){ mag = 0; } result_magnitude[x + y * w] = mag; } __global__ void threshold(unsigned char* magnitude, unsigned char* label, unsigned char low, unsigned char high, int w, int h){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; int value = magnitude[pos(x,y)]; if(value >= high) label[pos(x,y)] = 255; else if (value >= low) label[pos(x,y)] = 127; else label[pos(x,y)] = 0; } __global__ void mask(unsigned char* magnitude, unsigned char* mask, int w, int h){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; int value = mask[pos(x,y)]; if(value!=255) magnitude[pos(x,y)] = 0; } __global__ void transpose(unsigned char* in, unsigned char* out, int w, int h){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; out[y + x * h] = in[pos(x,y)]; } __global__ void cc(unsigned char* label, int w, int h){ int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= w) return; for(int y = 1; y < h; y++){ if(label[pos(x, y-1)] == 255 && label[pos(x,y)] == 127) label[pos(x,y)] = 255; } for(int y = h - 1; y > 0; y--){ if(label[pos(x, y)] == 255 && label[pos(x, y-1)] == 127) label[pos(x, y-1)] = 255; } } __global__ void to_gray(unsigned char* red, unsigned char* green, unsigned char* blue, unsigned char* dst, int w, int h){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; int r = 0.2989 * red[pos(x,y)]; int g = 0.5870 * green[pos(x,y)]; int b = 0.1140 * blue[pos(x,y)]; dst[x + y * w] = r+g+b; } ///// legace __global__ void get_magnitude(unsigned char* l1, unsigned char* l2, unsigned char* dst, int w, int h){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; int v1 = l1[x + y * w]; int v2 = l2[x + y * w]; int val = sqrt((float)v1 * v1 + v2 * v2); dst[x + y * w] = val; } }
11,717
extern "C" __global__ void mmkernel( float* a, float* b, float* c, int pitch_a, int pitch_b, int pitch_c, int n, int m, int p ) { int tx = threadIdx.x; int bx = blockDim.x; int i = blockIdx.x*bx*2 + threadIdx.x; int j = blockIdx.y; __shared__ float cc[512]; float sum0 = 0.0, sum1=0.0; for( int ks = 0; ks < p; ks += bx ){ cc[tx] = c[(ks+tx) * pitch_c + j]; __syncthreads(); for( int k = ks; k < ks+bx; ++k ) { sum0 += b[i+pitch_b*k] * cc[k-ks]; sum1 += b[i+bx+pitch_b*k] * cc[k-ks]; } __syncthreads(); } a[j+pitch_a*i] = sum0; a[j+pitch_a*(i+bx)] = sum1; }
11,718
#include <stdio.h> #include <stdlib.h> #define TILE_SIZE 8 __global__ void csr_multiply(int* d_row_size ,int* d_sparse, int* d_cols, int* d_vals, int* d_X, int* d_y){ __shared__ int dot[TILE_SIZE]; int thid = TILE_SIZE * blockIdx.x + threadIdx.x; int warp = thid/32; int stride = thid % 32; int row = warp; if (row < *d_row_size) { dot[threadIdx.x]=0; int row_start = d_sparse[row]; int row_end = d_sparse[row+1]; for(int elem = row_start+stride; elem < row_end ; elem +=32){ dot[threadIdx.x]+= d_vals[elem] * d_X[d_cols[elem]]; } for ( int s = 32 >> 1 ; s >= 1; s >>=1) { if(stride < s ){dot[threadIdx.x] += dot[threadIdx.x +s];} } if(stride==0){ d_y[row]+=dot[threadIdx.x]; } } }
11,719
#include "header.cuh" #include <stdio.h> __global__ void updateVirtualRows(long* currentTable) { long index_x = threadIdx.x + blockIdx.x * blockDim.x; if(index_x < (N+1)) //only threads required do the job { //copy bottom line to top currentTable[(N+1)*(N+2)+index_x] = currentTable[N+2+index_x]; //copy top line to bottom currentTable[index_x] = currentTable[N*(N+2)+index_x]; } } __global__ void updateVirtualColumns(long* currentTable) { long index_y = threadIdx.x + blockIdx.x * blockDim.x; if(index_y < (N+1)) //only threads required do the job { //copy left line to right currentTable[index_y*(N+2)+N+1] = currentTable[index_y*(N+2)+1]; //copy right line to left currentTable[index_y*(N+2)] = currentTable[index_y*(N+2)+N]; } } __global__ void updateVirtualCorners(long* currentTable) { currentTable[0] = currentTable[N*(N+2)+N]; currentTable[(N+1)*(N+2)+N+1] = currentTable[N+3]; currentTable[N+1] = currentTable[N*(N+2)+1]; currentTable[(N+1)*(N+2)] = currentTable[2*N+2]; } __global__ void calculateNextGen(long* currentTable, long* nextTable) { size_t i,j; size_t offset1 = threadIdx.y / (BLSIZE - 1); //index to traverse the localTable size_t offset2 = threadIdx.x / (BLSIZE - 1); //index to traverse the localTable size_t x = (blockIdx.y * blockDim.y + threadIdx.y) * CELLS_PER_THREAD ; //index to the globalTable size_t y = (blockIdx.x * blockDim.x + threadIdx.x) * CELLS_PER_THREAD ; //index to the globalTable size_t Y = threadIdx.y * CELLS_PER_THREAD; //index to the localTable size_t X = threadIdx.x * CELLS_PER_THREAD; //index to the localTable //define the grid of this block __shared__ long local_table[BLSIZE*CELLS_PER_THREAD+2][BLSIZE*CELLS_PER_THREAD+2]; //Copy the suitable part of the globalTable to the localTable //The size of the localTable is BLSIZE*CELLS_PER_THREAD+2 x BLSIZE*CELLS_PER_THREAD+2 (+2 for the virtual neighbours) for ( i = 0; i < CELLS_PER_THREAD + 1; i++){ size_t in2 = (x + offset1 + i ) * (N + 2);//The global grid has a N+2 edge for ( j = 0; j < CELLS_PER_THREAD + 1; j++) { size_t in1 = y + j + offset2; local_table[Y + offset1 + i][X + offset2 + j] = currentTable[in2 + in1]; } } //All threads should have reached this point in order to continue //Syncthreads() is used to guarantee that whole localTable has been filled uπ __syncthreads(); //Calculation of the next generation status int ii,jj; for (i= 1; i < CELLS_PER_THREAD + 1; i++) { ii = Y + i; size_t new_index_y = (x + i)*(N + 2); //each thread computes CELLS_PER_THREAD elements of the next_generation_table for (j = 1; j < CELLS_PER_THREAD + 1; j++) { jj = X + j; int livingNeighbors = local_table[ii - 1][jj - 1] + local_table[ii - 1][jj] + local_table[ii - 1][jj + 1] + local_table[ii][jj - 1] + local_table[ii][jj + 1] + local_table[ii + 1][jj - 1] + local_table[ii + 1][jj] + local_table[ii + 1][jj + 1]; size_t new_index_x = y + j; if (x < N + 1 && y < N + 1) //only threads required do the job { if(livingNeighbors == 3 || (livingNeighbors == 2 && local_table[ii][jj] == 1)) nextTable[new_index_y + new_index_x] = 1; else nextTable[new_index_y + new_index_x] = 0; } } } }
11,720
#include <cmath> #include <cuda.h> #include <cuda_runtime.h> #include "PerlinNoise.cuh" __device__ unsigned int hash1(unsigned int a) { a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } __device__ unsigned int hash2(unsigned int a) { a = (a ^ 61) ^ (a >> 16); a = a + (a << 3); a = a ^ (a >> 4); a = a * 0x27d4eb2d; a = a ^ (a >> 15); return a; } __device__ unsigned int hashPaire(unsigned int a, unsigned int b) { return (a >= b)? a*a + a + b : a+ b*b; } __device__ float dot(float2 v1, float2 v2) { return v1.x*v2.x + v1.y*v2.y; } __device__ float norm2(float2 v) { return sqrt(dot(v, v)); } __device__ float distance2(float2 v1, float2 v2) { return norm2(make_float2(v1.x - v2.x, v1.y - v2.y)); } __device__ float2 normalize(float2 v) { float n = norm2(v); if (n <= 1e-7f) return v; v.x /= n; v.y /= n; return v; } __device__ float2 getGradient(unsigned int xCell, unsigned int yCell) { unsigned int hashX = hash1(hashPaire(xCell, yCell)); unsigned int hashY = hash1(hashPaire(yCell, hashX%(1 << 16))); float x = float(hashX % (1 << 10)) - (1 << 9); float y = float(hashY % (1 << 10)) - (1 << 9); return normalize(make_float2(x, y)); } __device__ float interpolate(float t, float a, float b) { t = t*t*t*(6*t*t-15*t+10); return a + t*(b-a); } __global__ void CUDA_noise( float width, float height, unsigned int nbNodes, float* d_x, float* d_y, float* d_z, float frequency, float amplitude) { const unsigned int node = blockIdx.x*blockDim.x + threadIdx.x; if (node >= nbNodes) return ; const float squareSize = height/frequency; float x = d_x[node]; if (x < 0.f) x = squareSize/2; if (x >= width) x = width - squareSize/2; float y = d_y[node]; if (y < 0.f) y = squareSize / 2; if (y >= height) y = height - squareSize/2; unsigned int xCell = x/squareSize; unsigned int yCell = y/squareSize; const unsigned int directions[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}}; float values[4]; for (unsigned int dir = 0; dir < 4; ++dir) { unsigned int xCorner = xCell+directions[dir][0]; unsigned int yCorner = yCell+directions[dir][1]; float2 gradient = getGradient(xCorner, yCorner); float2 pos = make_float2(xCorner*squareSize, yCorner*squareSize); float2 arrow = make_float2(pos.x - x, pos.y - y); values[dir] = dot(normalize(arrow), gradient); } float xReal = (x - xCell*squareSize) / squareSize; float y1 = interpolate(xReal, values[0], values[2]); float y2 = interpolate(xReal, values[1], values[3]); float yReal = (y - yCell*squareSize) / squareSize; float z = interpolate(yReal, y1, y2); d_z[node] += amplitude*z; } __host__ void CUDA_PerlinNoise( float width, float height, unsigned int nbNodes, float* h_x, float* h_y, float* h_z, float fundamental, unsigned int nbOctaves, float persistence) { const unsigned int nbBytes = sizeof(float)*nbNodes; float* d_x; cudaMalloc(&d_x, nbBytes); cudaMemcpy(d_x, h_x, nbBytes, cudaMemcpyHostToDevice); float* d_y; cudaMalloc(&d_y, nbBytes); cudaMemcpy(d_y, h_y, nbBytes, cudaMemcpyHostToDevice); float* d_z; cudaMalloc(&d_z, nbBytes); cudaMemset(d_z, 0, nbBytes); float frequency = fundamental; float amplitude = 1.; float amplitudeMax = 0.; for (unsigned int octave = 0; octave < nbOctaves; ++octave) { amplitudeMax += amplitude; const unsigned int blockSize = 128; const dim3 gridSize((nbNodes+blockSize-1)/blockSize); CUDA_noise<<<gridSize, blockSize>>>(width, height, nbNodes, d_x, d_y, d_z, frequency, amplitude); frequency *= 2.5; amplitude *= persistence; } cudaMemcpy(h_z, d_z, nbBytes, cudaMemcpyDeviceToHost); for (unsigned int node = 0; node < nbNodes; ++node) { h_z[node] /= amplitudeMax; h_z[node] = (h_z[node]+1)/2; } cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); }
11,721
#include "includes.h" __global__ void update_cluster(int *cluster, float *centroid, float *B_c, float *G_c, float *R_c, int size_image, int n_threads, int K, float *Bdata, float *Gdata, float *Rdata, float *nValue) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int id = threadIdx.x; int k = blockIdx.x; int size_per_thread = int(size_image/n_threads); int start = id*size_per_thread; int end = start + size_per_thread; float count = 0; float B = 0; float G = 0; float R = 0; nValue[tid] = 0; Bdata[tid] = 0; Gdata[tid] = 0; Rdata[tid] = 0; if (id >=size_image){ return; } if (id==n_threads-1) { start = (n_threads-1)*size_per_thread; end = size_image; } for(int j = start; j < end; j++) { if(cluster[j] == k) { B = B + (B_c[j]); G = G + (G_c[j]); R = R + (R_c[j]); count = count + 1; } } nValue[tid] = count; Bdata[tid] = B; Gdata[tid] = G; Rdata[tid] = R; __syncthreads(); for(unsigned int s=1; s < blockDim.x; s *= 2) { if(tid % (2*s) == 0 && tid+s<=(k+1)*n_threads) { nValue[tid] += nValue[tid + s]; Bdata[tid] += Bdata[tid + s]; Gdata[tid] += Gdata[tid + s]; Rdata[tid] += Rdata[tid + s]; } __syncthreads(); } if(id == 0) { //printf("Block: %d. nValue: %f.\n", k, nValue[tid]); if (nValue[tid] != 0) { centroid[k*3 + 0] = Bdata[tid] / nValue[tid]; centroid[k*3 + 1] = Gdata[tid] / nValue[tid]; centroid[k*3 + 2] = Rdata[tid] / nValue[tid]; } } }
11,722
#include "includes.h" __global__ void makeBlockHessian( float *h, float *forces1, float *forces2, float *mass, float blockDelta, int *blocks, int *blocksizes, int numblocks, int *hessiannums, int *hessiansizes, int setnum, int N ) { int blockNum = blockIdx.x * blockDim.x + threadIdx.x; int dof = 3 * blocks[blockNum] + setnum; int atom = dof / 3; if( atom >= N || ( blockNum != numblocks - 1 && atom >= blocks[blockNum + 1] ) ) { return; // Out of bounds } int start_dof = 3 * blocks[blockNum]; int end_dof; if( blockNum == numblocks - 1 ) { end_dof = 3 * N; } else { end_dof = 3 * blocks[blockNum + 1]; } /* I also would like to parallelize this at some point as well */ for( int k = start_dof; k < end_dof; k++ ) { float blockScale = 1.0 / ( blockDelta * sqrt( mass[atom] * mass[k / 3] ) ); //h[startspot+i] = (forces1[k] - forces2[k]) * blockScale; h[hessiannums[blockNum] + ( k - start_dof ) * ( 3 * blocksizes[blockNum] ) + ( dof - start_dof )] = ( forces1[k] - forces2[k] ) * blockScale; } }
11,723
#include "includes.h" __device__ int getGlobalIdx_2D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __global__ void CudaTransform(unsigned char* dev_img, unsigned int *dev_accu, int w, int h){ //calculate index which this thread have to process unsigned int index = getGlobalIdx_2D_2D(); //check index is in image bounds if(index < (w*h)){ //calculate params float hough_h = ((sqrt(2.0) * (float)(h>w?h:w)) / 2.0); float center_x = w/2; float center_y = h/2; //calculate coordinates for corresponding index in entire image int x = index % w; int y = index / w; if( dev_img[index] > 250 ){ //se il punto è bianco (val in scala di grigio > 250) for(int t=0;t<180;t++){ //plot dello spazio dei parametri da 0° a 180° (sist. polare) float r = ( ((float)x - center_x) * cos((float)t * DEG2RAD)) + (((float)y - center_y) * sin((float)t * DEG2RAD)); //dev_accu[ (int)((round(r + hough_h) * 180.0)) + t]++; atomicAdd(&(dev_accu[ (int)((round(r + hough_h) * 180.0)) + t]), 1); } } } }
11,724
__global__ void padding(const float *A,int kw,int kh,int cusiz,float *C){ int tx = threadIdx.x+blockIdx.x*cusiz; int ty = threadIdx.y+blockIdx.y*cusiz; int aw = blockDim.x*gridDim.x; int ah = blockDim.y*gridDim.y; int cw = aw+kw-1; int ch = ah+kh-1; int pw = (kw-1)/2; int ph = (kh-1)/2; int a_idx = tx + ty*aw; int c_idx = (tx+pw) + (ty+ph)*cw; C[c_idx]=A[a_idx]; // 왼쪽 위 if ( tx<=pw && ty<=ph ){ C[tx + ty*cw] = A[0 + 0*aw]; } // 오른쪽 위 else if ( (aw-pw)<=tx && ty<=ph ){ C[(2*pw+tx) + ty*cw] = A[(aw-1) + 0*aw]; } // 왼쪽 아래 else if ( tx<=pw && (ah-ph)<=ty ){ C[tx + (2*ph+ty)*cw] = A[0 + (ah-1)*aw]; } // 오른쪽 아래 else if ( (aw-pw)<=tx && (ah-ph)<=ty ){ C[(2*pw+tx) + (2*ph+ty)*cw] = A[(aw-1) + (ah-1)*aw]; } // 위 if ( ty<ph ){ C[(pw+tx) + ty*cw] = A[tx + 0*aw]; } // 아래 else if ( ah-ph<=ty ){ C[(pw+tx) + (2*ph+ty)*cw]=A[tx + (ah-1)*aw]; } // 왼쪽 else if ( tx<pw ){ C[(tx) + (ty+ph)*cw] = A[0 + ty*aw]; } // 오른쪽 else if ( aw-pw<=tx ){ C[(2*pw+tx) + (ty+ph)*cw] = A[(aw-1) + ty*aw]; } }
11,725
#include <stdio.h> #include <stdlib.h> #define N 32 __global__ void thread_multi(int *t1) { int i=blockDim.x * blockIdx.x + threadIdx.x; int j=threadIdx.x; t1[i]=j; } int main() { int *d_t1; int *h_t1; int i=0; size_t size = N*sizeof(float); //Memory allocation cudaMalloc((void **)&d_t1, size); h_t1=(int *)malloc(size); //Initialize the array for (i=0; i<N; i++) h_t1[i] = 0; //Run invoke threads to execute the kernel cudaMemcpy(d_t1, h_t1, size, cudaMemcpyHostToDevice); thread_multi<<<4,N/4>>>(d_t1); cudaMemcpy(h_t1, d_t1, size, cudaMemcpyDeviceToHost); for(i=0; i<N; i++) printf("%d: %d\n",i, h_t1[i]); cudaFree(d_t1); free(h_t1); printf("\nDone\n"); return 0; }
11,726
#include <stdio.h> #include <cuda_runtime.h> #include <stdint.h> __global__ void kernel() { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t n = tid; uint32_t sum = 0; uint32_t prod = 1; while(n != 0){ uint32_t digit = n % 10; n /= 10; sum += digit; prod *= digit; } if(sum*prod == tid) printf("%u\n", tid); return; } void checkrange(uint32_t range){ double dim = sqrt(range); // 1024 : 32 / 16777216 : 4096 uint32_t nbThreads = (uint32_t)ceil(range/(dim)); if (nbThreads >= 1024){ nbThreads = 1024; } printf("Checking %u for sum-product numbers\n", range); kernel<<<(uint32_t)dim, nbThreads, 0 >>>(); // 1024 : 32 blocks ; 32 threads / 16777216 : 4096 blocks ; 4096 Threads cudaDeviceSynchronize(); } int main() { // main iteration checkrange(1024); checkrange(16777216); return 0; } /* ceil : arrondi au supérieur There is a limit to the number of threads per block, since all threads of a block are expected to reside on the same processor core and must share the limited memory resources of that core. On current GPUs, a thread block may contain up to 1024 threads. */
11,727
/* nvcc -arch=sm_35 -o delay delay.cu */ #include <stdio.h> #include <stdlib.h> #define MAX_DELAY 30 #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) #include <time.h> #include <sys/time.h> #define USECPSEC 1000000ULL unsigned long long dtime_usec(unsigned long long start){ timeval tv; gettimeofday(&tv, 0); return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start; } #define APPRX_CLKS_PER_SEC 1000000000ULL __global__ void delay_kernel(unsigned seconds){ unsigned long long dt = clock64(); while (clock64() < (dt + (seconds*APPRX_CLKS_PER_SEC))); } int main(int argc, char *argv[]){ unsigned delay_t = 2; // seconds, approximately unsigned delay_t_r; if (argc > 1) delay_t_r = atoi(argv[1]); if ((delay_t_r > 0) && (delay_t_r < MAX_DELAY)) delay_t = delay_t_r; unsigned long long difft = dtime_usec(0); delay_kernel<<<1,1>>>(delay_t); cudaDeviceSynchronize(); //cudaCheckErrors("kernel fail"); difft = dtime_usec(difft); printf("kernel duration: %fs\n", difft/(float)USECPSEC); return 0; }
11,728
#include "includes.h" __global__ void test() { }
11,729
#include <stdio.h> #include <sys/time.h> #include <iostream> #include <fstream> using namespace std; /** * O argumento deve ser double */ #define GET_TIME(now) { \ struct timespec time; \ clock_gettime(CLOCK_MONOTONIC_RAW, &time); \ now = time.tv_sec + time.tv_nsec/1000000000.0; \ } /** * Para checar erros em chamadas Cuda */ #define CUDA_SAFE_CALL(call) { \ cudaError_t err = call; \ if(err != cudaSuccess) { \ fprintf(stderr,"Erro no arquivo '%s', linha %i: %s.\n",__FILE__, __LINE__,cudaGetErrorString(err)); \ exit(EXIT_FAILURE); } \ } void luSeq (double *A, int n) { for (int i=0; i<n-1; i++){ for (int j=1; j<n; j++){ A[j*n+i] = A[j*n+i]/A[i*n+i]; for (int k=1; k<n; k++){ A[j*n+k] = A[j*n+k] - A[j*n+i]*A[i*n+k]; } } } } __global__ void luCalcCol(double *A , int dim, int i) { __shared__ double Aii; if (threadIdx.x == 0) { Aii = A[i*(dim +1)]; } __syncthreads (); int j = blockIdx.x * blockDim.x + threadIdx.x + i + 1; if ( j < dim ) { A[ j*dim+i ] /= Aii; } } __global__ void luCalcSub(double *A, int dim , int i) { __shared__ double a_ji[32]; __shared__ double a_ik[32]; int j = blockDim.x * blockIdx.x + threadIdx.x + i + 1; int k = blockDim.y * blockIdx.y + threadIdx.y + i + 1; if (( threadIdx.y == 0) && (j < dim)) { a_ji[threadIdx.x] = A[ j*dim + i ]; } if (( threadIdx.x == 0) && (k < dim)) { a_ik[threadIdx.y] = A[ i*dim + k ]; } __syncthreads (); if ((j < dim) && (k < dim)) { A[ j*dim + k ] -= a_ji[threadIdx.x] * a_ik[threadIdx.y]; } } void luGPU(double *A, int n, int blockSize) { int i, n_blocos; for (i = 0; i < n-1; i++) { n_blocos = ((n-i-1) + blockSize -1) / blockSize; dim3 g_blocos(n_blocos, n_blocos); dim3 n_threads(blockSize,blockSize); luCalcCol <<< n_blocos, blockSize >>>(A, n, i); CUDA_SAFE_CALL(cudaGetLastError()); luCalcSub <<< g_blocos, n_threads >>>(A, n, i); CUDA_SAFE_CALL(cudaGetLastError()); } } void luGPUPivot(double *A, int n, int blockSize) { int i, n_blocos; for (i = 0; i < n-1; i++) { n_blocos = ((n-i-1) + blockSize -1) / blockSize; dim3 g_blocos(n_blocos, n_blocos); dim3 n_threads(blockSize,blockSize); luCalcCol <<< n_blocos, blockSize >>>(A, n, i); CUDA_SAFE_CALL(cudaGetLastError()); luCalcSub <<< g_blocos, n_threads >>>(A, n, i); CUDA_SAFE_CALL(cudaGetLastError()); } } void luGPUPivotEscal(double *A, int n, int blockSize) { int i, n_blocos; for (i = 0; i < n-1; i++) { n_blocos = ((n-i-1) + blockSize -1) / blockSize; dim3 g_blocos(n_blocos, n_blocos); dim3 n_threads(blockSize,blockSize); luCalcCol <<< n_blocos, blockSize >>>(A, n, i); CUDA_SAFE_CALL(cudaGetLastError()); luCalcSub <<< g_blocos, n_threads >>>(A, n, i); CUDA_SAFE_CALL(cudaGetLastError()); } } void fillMatrix(double* A, int n){ for (int i=0; i<n; i++){ for (int j=0; j<n; j++){ A[i*n+j] = (i+1)*(j+1); } } } void checkResults(double *mat1, double *mat2, int n){ for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { if (fabs(mat1[i*n+j] - mat2[i*n+j]) > 1e-5) { cerr << "Resultado incorreto em " << i << " x " << j << " -> " << mat1[i*n+j] << " " << mat2[i*n+j] << endl; exit(EXIT_FAILURE); } } } } void printResults(int n, double timeSeq, double timeCpuGpu, double timeRunPar, double timeGpuCpu){ cout << n << ";" << timeSeq << ";" << timeCpuGpu << ";" << timeRunPar << ";" << timeGpuCpu << endl; } int main(int argc, char** argv) { int n=0, blockSize; double *Aseq, *Apar, *Adevice; double begin, end, timeSeq, timeCpuGpu, timeRunPar, timeGpuCpu; char *inputFileName, *outputFileName; if(argc < 3) { cerr << "Digite: "<< argv[0] <<" <Arquivo de entrada> <Arquivo de saída> [Dimensão do bloco] [p|e]" << endl; exit(EXIT_FAILURE); } inputFileName = argv[1]; outputFileName = argv[2]; ifstream infile (inputFileName, ios::binary); infile.read(reinterpret_cast<char *>(&n), sizeof(int)); size_t matBytes = n*n*sizeof(double); Aseq = (double *) malloc(matBytes); if ( Aseq == NULL ) { cerr << "Memoria insuficiente" << endl; exit(EXIT_FAILURE); } infile.read(reinterpret_cast<char *>(Aseq), matBytes); infile.close(); if (argc > 3){ blockSize = atol(argv[3]); Apar = (double *) malloc(matBytes); if ( Apar == NULL ) { cerr << "Memoria insuficiente" << endl; exit(EXIT_FAILURE); } GET_TIME(begin); CUDA_SAFE_CALL(cudaMalloc((void**) &Adevice, matBytes)); CUDA_SAFE_CALL(cudaMemcpy(Aseq, Adevice, matBytes, cudaMemcpyDeviceToHost)); GET_TIME(end); timeCpuGpu = end-begin; if (argc > 4){ if (argv[4][0] == 'e'){ cout << "PIVOTEAMENTO ESCALONADO" << endl; GET_TIME(begin); luGPUPivotEscal(Adevice, n, blockSize); GET_TIME(end); }else{ cout << "PIVOTEAMENTO BÁSICO" << endl; GET_TIME(begin); luGPUPivot(Adevice, n, blockSize); GET_TIME(end); } } else { cout << "PARALELO" << endl; GET_TIME(begin); luGPU(Adevice, n, blockSize); GET_TIME(end); } timeRunPar = end-begin; GET_TIME(begin); CUDA_SAFE_CALL(cudaMemcpy(Apar, Adevice, matBytes, cudaMemcpyDeviceToHost)); GET_TIME(end); timeGpuCpu = end-begin; CUDA_SAFE_CALL(cudaFree(Adevice)); free(Apar); } else { cout << "SEQUENCIAL" << endl; GET_TIME(begin); luSeq(Aseq, n); GET_TIME(end); timeSeq = end-begin; } free(Aseq); // checkResults(Aseq, Apar, n); // ofstream outfile (outputFileName, ios::binary); // outfile.write(reinterpret_cast<char *>(&n), sizeof(int)); // outfile.write(reinterpret_cast<char *>(Aseq), matBytes); // outfile.close(); printResults(n, timeSeq, timeCpuGpu, timeRunPar, timeGpuCpu); CUDA_SAFE_CALL(cudaDeviceReset()); exit(EXIT_SUCCESS); }
11,730
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<math.h> #define N 10 __global__ void sum(double *a,double *o) { int i; int id=threadIdx.x; for(i=0; i< N ;i++) { if(id<i) { a[id]+=a[id+i]; } } o[0]=a[0]; } __global__ void standardDeviation(double *a,double avg) { int id=threadIdx.x; if(id < N) { a[id] -= avg; a[id] = a[id] * a[id]; } } int main() { double *h_a,*d_a,*oh_a,*od_a,*d_a1; int size= N * sizeof(double); h_a=(double *)malloc(size); oh_a=(double*)malloc(size); cudaMalloc(&d_a,size); cudaMalloc(&d_a1,size); cudaMalloc(&od_a,size); int i; for(i=0 ;i<N ;i++) { h_a[i] = random() % N; } printf("\n\nNumbers =>"); for(i=0 ;i<N ;i++) { printf("%lf ",h_a[i]); } cudaMemcpy(d_a, h_a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_a1, h_a,size,cudaMemcpyHostToDevice); sum<<<1, N/2>>>(d_a,od_a); cudaMemcpy(oh_a, od_a,size,cudaMemcpyDeviceToHost); printf("\n\nSum => %lf",oh_a[0]); float arithmeticMean=(float)oh_a[0]/N; printf("\n\nArithmetic Mean => %f",arithmeticMean); standardDeviation<<<1, N>>>(d_a1,arithmeticMean); sum<<<1, N/2>>>(d_a1,od_a); cudaMemcpy(oh_a, od_a,size,cudaMemcpyDeviceToHost); double temp =oh_a[0]/N; printf("\n\nStandard Deviation => %lf\n\n",sqrt(temp)); cudaFree(d_a); cudaFree(od_a); cudaFree(d_a1); free(h_a); free(oh_a); return 0; }
11,731
#include "includes.h" __global__ void MatrixAdd_d( float *A, float *B, float *C, int N ) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * N + j; if( ( i < N ) && ( j < N ) ) { C[ index ] = A[ index ] + B[ index ]; } }
11,732
#include <stdio.h> __global__ void myKernel() { printf("Hello, world from the device!\n"); } int main() { myKernel<<<1,1>>>(); cudaDeviceSynchronize(); }
11,733
#include "stdio.h" #include "assert.h" #include "math.h" #include <iostream> using namespace std; #define N 100000 #define BLOCK_SIZE 1024 #define MAX_ERR 1e-6 __global__ void add(int *a, int *b, int *c, int count) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < count) { c[idx] = a[idx] + b[idx]; } } int main() { int *ha, *hb, *hc; int *da, *db, *dc; ha = (int *)malloc(sizeof(int) * N); hb = (int *)malloc(sizeof(int) * N); hc = (int *)malloc(sizeof(int) * N); for (int i = 0; i < N; i++) { ha[i] = -i; hb[i] = i * i; } cudaMalloc((void **)&da, sizeof(int) * N); cudaMalloc((void **)&db, sizeof(int) * N); cudaMalloc((void **)&dc, sizeof(int) * N); cudaMemcpy(da, ha, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(db, hb, sizeof(int) * N, cudaMemcpyHostToDevice); add<<<(N + BLOCK_SIZE) / BLOCK_SIZE, BLOCK_SIZE>>>(da, db, dc, N); cudaMemcpy(hc, dc, sizeof(int) * N, cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { assert(abs(hc[i] - ha[i] - hb[i]) < MAX_ERR); } cout << "passed" << endl; free(ha); free(hb); free(hc); cudaFree(da); cudaFree(db); cudaFree(dc); return 0; }
11,734
#include <stdio.h> //#include <cuda.h> int main(void) { cudaDeviceProp prop;//cudaDevieProp is a structure, prop is a structure variable. int count; cudaGetDeviceCount (&count); for (int i=0; i< count; i++){ cudaGetDeviceProperties (&prop, i); printf("\n****************************Device %d*************************************\n",i); printf("----------General Information----------\n"); printf("Name: %s\n", prop.name); printf("Compute capability: %d.%d\n", prop.major, prop.minor); // printf("Clock rate: %d\n", prop.clockRate); printf("Clock rate: %d MHz\n", (prop.clockRate/1000)); printf("GPU type: "); if (prop.integrated) printf ("Integrated GPU\n"); else printf ("Discreted GPU\n"); printf("Device copy overlap: "); if (prop.deviceOverlap) printf ("Enabled\n"); else printf ("Disabled\n"); printf( "Kernel execution timeout: "); if (prop.kernelExecTimeoutEnabled) printf ("Enabled\n"); else printf ("Disabled\n"); printf( "Mapping Host Memory into CUDA Device Address Space: "); if (prop.canMapHostMemory) printf ("Enabled\n"); else printf ("Disabled\n"); printf( "Executing Multiple Kernels Support: "); if (prop.concurrentKernels) printf ("Enabled\n"); else printf ("Disabled\n"); printf( "Device Computing Mode: "); if (prop.computeMode == 0) printf("Default\n"); if (prop.computeMode == 1) printf("Exclusive\n"); if (prop.computeMode == 2) printf("Prohibited\n"); printf( "\n----------Memory Information for device----------\n"); // printf( "Total Global Memory: %ld\n", prop.totalGlobalMem); printf( "Total Global Memory: %ld MB\n", (prop.totalGlobalMem/1024/1024)); // printf( "Total Constant Memory: %ld\n", prop.totalConstMem ); printf( "Total Constant Memory: %ld kB\n", (prop.totalConstMem/1024)); // printf( "Total Constant Memory: %ld MB\n", (prop.totalConstMem/1024/1024)); // printf( "Max Memory Pitch Allowed for Memory Copies: %ld\n", prop.memPitch ); printf( "Max Memory Pitch Allowed for Memory Copies: %ld MB\n", (prop.memPitch/1024/1024)); printf( "Texture Alignment: %ld B\n", prop.textureAlignment ); printf( "Max Size Supported for 1D Textures: %ld\n", prop.maxTexture1D); printf( "Max Dimensions Supported for 2D Texture: (%d, %d)\n", prop.maxTexture2D[0], prop.maxTexture2D[1]); printf( "Max Dimensions Supported for 3D Texture: (%d, %d, %d)\n", prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]); /* printf( "Max Dimensions Supported for 2D Texture Array: (%d, %d, %d)\n", prop.maxTexture2DArray[0], prop.maxTexture2DArray[1], prop.maxTexture2DArray[2]); */ printf( "\n----------Thread Information for device----------\n"); printf( "Streaming Multiprocessor count: %d\n", prop.multiProcessorCount); // printf( "Shared Memory per Block: %ld\n", prop.sharedMemPerBlock); printf( "Shared Memory per Block: %ld KB\n", (prop.sharedMemPerBlock/1024)); printf( "Registers per Block: %ld\n", prop.regsPerBlock); printf( "Threads in warp: %ld\n", prop.warpSize); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("*****************************************************************\n"); } return 0; }
11,735
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void memTransfer(int* input) { int gID = blockIdx.x * blockDim.x + threadIdx.x; printf("tid = %d, gid = %d, value = %d\n", threadIdx.x, gID, input[gID]); } int main() { int size = 128; int byteSize = size * sizeof(int); int *sInput; sInput = (int *)malloc(byteSize); time_t t; srand((unsigned)time(&t)); for (int i = 0; i < size; ++i) { sInput[i] = (int)(rand() & 0xff); } int *dInput; cudaMalloc((void **)&dInput, byteSize); cudaMemcpy(dInput, sInput, byteSize, cudaMemcpyHostToDevice); dim3 block(64); dim3 grid(2); memTransfer<<<grid, block>>>(dInput); cudaDeviceSynchronize(); cudaFree(dInput); free(sInput); cudaDeviceReset(); return 0; }
11,736
#include "includes.h" /*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/ /*(c) 2016 Brian Tarasinski*/ /*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/ //kernel to transform to pauli basis (up, x, y, down) //to be run on a complete complex density matrix, once for each bit //this operation is its own inverse (can also be used in opposite direction) __global__ void trace(double *diag, int bit) { unsigned int x = threadIdx.x; unsigned int mask = 0; if(bit >= 0) { mask = 1 << bit; } extern __shared__ double s_diag[]; s_diag[x] = diag[x]; __syncthreads(); double a; for(unsigned int i=1; i < blockDim.x; i <<= 1) { if(i != mask && i <= x) { a = s_diag[x-i]; } __syncthreads(); if(i != mask && i <= x) { s_diag[x] += a; } __syncthreads(); } __syncthreads(); //copy result back if(x == 0) { diag[blockIdx.x] = s_diag[blockDim.x - 1]; return; } if(x == 1 && bit >= 0) { diag[blockIdx.x + 1] = s_diag[blockDim.x - 1 - mask]; return; } }
11,737
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> int MAX_PARTICLES; int NUM_ITERATIONS; int TPB; float DEC_FACTOR; float TOLERANCE = 1e-6; typedef struct { float3 position; float3 velocity; } Particle; __global__ void timestepGPU(Particle* array, int nPart, float dec_fact) { int myId = blockIdx.x * blockDim.x + threadIdx.x; if (myId < nPart) { array[myId].velocity.x = array[myId].velocity.x * dec_fact; array[myId].velocity.y = array[myId].velocity.y * dec_fact; array[myId].velocity.z = array[myId].velocity.z * dec_fact; array[myId].position.x = array[myId].position.x + array[myId].velocity.x; array[myId].position.y = array[myId].position.y + array[myId].velocity.y; array[myId].position.z = array[myId].position.z + array[myId].velocity.z; } } void timestepCPU(Particle* array) { for (int i = 0; i < MAX_PARTICLES; i++) { array[i].velocity.x = array[i].velocity.x * DEC_FACTOR; array[i].velocity.y = array[i].velocity.y * DEC_FACTOR; array[i].velocity.z = array[i].velocity.z * DEC_FACTOR; array[i].position.x = array[i].position.x + array[i].velocity.x; array[i].position.y = array[i].position.y + array[i].velocity.y; array[i].position.z = array[i].position.z + array[i].velocity.z; } } int compare(Particle* x, Particle* y) { int value = 1; for(int i = 0; i < MAX_PARTICLES && value; i++) { value = value & (x[i].position.x - y[i].position.x < TOLERANCE); value = value & (x[i].position.y - y[i].position.y < TOLERANCE); value = value & (x[i].position.z - y[i].position.z < TOLERANCE); value = value & (x[i].velocity.x - y[i].velocity.x < TOLERANCE); value = value & (x[i].velocity.y - y[i].velocity.y < TOLERANCE); value = value & (x[i].velocity.z - y[i].velocity.z < TOLERANCE); } return value; } void initArray(Particle* p) { for(int i = 0; i < MAX_PARTICLES; i++) { p[i].position.x = (float) rand() / RAND_MAX; p[i].position.y = (float) rand() / RAND_MAX; p[i].position.z = (float) rand() / RAND_MAX; p[i].velocity.x = (float) rand() / RAND_MAX; p[i].velocity.y = (float) rand() / RAND_MAX; p[i].velocity.z = (float) rand() / RAND_MAX; } } double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void setParameters(int argc, char** argv) { switch (argc) { case 5: DEC_FACTOR = atof(argv[4]); case 4: TPB = atoi(argv[3]); case 3: NUM_ITERATIONS = atoi(argv[2]); case 2: MAX_PARTICLES = atoi(argv[1]); break; default: MAX_PARTICLES = 100000; NUM_ITERATIONS = 100; TPB = 256; DEC_FACTOR = 0.9; break; } } int main(int argc, char **argv) { setParameters(argc, argv); //Input parametres: //[1] : Number of particles //[2] : Number of iterations //[3] : Decreasing factor of velocity (optional) double iStart, iElapsCPU, iElapsGPU; //Initialization of pointers Particle* pOriginal = (Particle*) malloc(MAX_PARTICLES * sizeof(Particle)); initArray(pOriginal); Particle* pCPU = (Particle*) malloc(MAX_PARTICLES * sizeof(Particle)); memcpy(pCPU, pOriginal, MAX_PARTICLES * sizeof(Particle)); //Particle* pForeign = (Particle*) malloc(MAX_PARTICLES * sizeof(Particle)); Particle* pForeign; cudaHostAlloc(&pForeign, MAX_PARTICLES * sizeof(Particle), cudaHostAllocDefault); memcpy(pForeign, pOriginal, MAX_PARTICLES * sizeof(Particle)); Particle* pGPU; cudaMalloc(&pGPU, MAX_PARTICLES * sizeof(Particle)); //Computing by CPU //printf("Computing by CPU... "); iStart = cpuSecond(); for (int i = 0; i < NUM_ITERATIONS; i++) { timestepCPU(pCPU); } iElapsCPU = cpuSecond() - iStart; //printf("Done\n"); //Computing by GPU //printf("Computing by GPU... "); iStart = cpuSecond(); for (int i = 0; i < NUM_ITERATIONS; i++) { //Moving data to the device cudaMemcpy(pGPU, pForeign, MAX_PARTICLES * sizeof(Particle), cudaMemcpyHostToDevice); timestepGPU<<<(MAX_PARTICLES + TPB - 1)/TPB, TPB>>>(pGPU, MAX_PARTICLES, DEC_FACTOR); cudaMemcpy(pForeign, pGPU, MAX_PARTICLES*sizeof(Particle), cudaMemcpyDeviceToHost); } cudaDeviceSynchronize(); iElapsGPU = cpuSecond() - iStart; //printf("Done\n"); //Sum up printf("\nSize of the array: %d\nTPB: %d\n", MAX_PARTICLES, TPB); printf("CPU time: %2f\nGPU time: %2f\n", iElapsCPU, iElapsGPU); int comp = compare(pForeign, pCPU); if (comp) { //printf("Both arrays are equal\n"); } else { printf("Differences between arrays\n"); } return 0; }
11,738
#include "includes.h" __global__ void grayscaleKernel(int *ms, int *aux, int n){ int i = threadIdx.x+blockDim.x*blockIdx.x; int k=0; int grayscale=0; if(i<n){ for(k=0; k<n-3; k+=3){ grayscale = 0.299*ms[i*n+k] + 0.5876*ms[i*n+k+1] + 0.114*ms[i*n+k+2]; aux[i*n+k] = aux[i*n+k+1] = aux[i*n+k+2] = grayscale; } } }
11,739
#include <cuda.h> #include <cmath> #include <iostream> #include <memory> #include <limits> #include <cassert> __global__ void function(float a, int n_per, double step, float temp, double * sum_array){ double sum_part = 0.0; double x = a + (temp * (threadIdx.x)); for(int i = 0; i < n_per; i++){ if(x != 0.0){ double val = sin(x)/x; sum_part += val; } x += step; } sum_array[threadIdx.x] = sum_part; __syncthreads(); } int main(int argc, char **argv){ cudaError_t rv; rv = cudaDeviceReset(); assert(rv == cudaSuccess); if(argc != 5) {std::cerr<< "Incorrect number of arguments" << std::endl; return EINVAL;}; float a = std::stod(argv[1]); float b = std::stod(argv[2]); int n = atoi(argv[3]); int n_threads = std::stoull(argv[4]); if(n_threads < 1) {std::cerr << "Incorrect number of arguments" << std::endl; return EINVAL;}; //Here the number of steps per thread is calculated and the size of each subsection is also calculated and set to temp float temp = std::abs((b-a)) / n_threads; double step = (b-a)/n; int n_per = n / n_threads; //create sum on global mem double sum = 0.0; double *sum_array; rv = cudaMalloc(&sum_array, n_threads * sizeof(double)); assert(rv == cudaSuccess); double *sum_temp = (double *)malloc(n_threads * sizeof(double)); for(int i = 0; i < n_threads; i++){ sum_temp[i] = 0.0; } cudaMemcpy(sum_array, sum_temp, n_threads * sizeof(double), cudaMemcpyHostToDevice); //and have it set to 0 //cuda kernel call function<<<1, n_threads>>>(a, n_per, step, temp, sum_array); cudaMemcpy(sum_temp, sum_array, n_threads*sizeof(double), cudaMemcpyDeviceToHost); for(int i = 0; i < n_threads; i++){ sum += sum_temp[i]; } //Here the different values of the trapezoidal rule are calculated to give the result as "answer" double val2 = 0.0; if(a != 0) val2 = (sin(a)/a); double val3 = 0.0; if(b != 0) val3 = (sin(b)/b); val3 = val3 / 2; val2 = val2 / 2; typedef std::numeric_limits< double > dbl; std::cout.precision(dbl::max_digits10); double answer = step * (val2 + sum + val3); std::cout << answer << std::endl; rv = cudaFree(sum_array); assert(rv == cudaSuccess); free(sum_temp); return 0; }
11,740
#include "includes.h" __global__ void cuda_sirt_pixels_kernel(int p, int nx, int dx, float* recon, const float* data) { int d0 = blockIdx.x * blockDim.x + threadIdx.x; int dstride = blockDim.x * gridDim.x; for(int d = d0; d < dx; d += dstride) { float sum = 0.0f; for(int i = 0; i < nx; ++i) sum += recon[d * nx + i]; float upd = data[p * dx + d] - sum; for(int i = 0; i < nx; ++i) recon[d * nx + i] += upd; } }
11,741
#include<stdio.h> #include<stdlib.h> __global__ void say_hello_gpu(void) { printf("hello from gpu %d, %d\n", threadIdx.x, blockIdx.x); } int main(void) { printf("hello from cpu\n"); say_hello_gpu<<<2,2>>>(); cudaDeviceSynchronize(); return 0; }
11,742
/* * PROJECT: Pairwise sequence alignments on GPU * FILE: psa_swgotoh_ref_2b_gpu * AUTHOR(S): Alejandro Chacon <alejandro.chacon@uab.es> * DESCRIPTION: Device functions for the SW-Gotoh GPU implementation using: * (A) pack 4 SW in the same register using native video instructions. * (B) 32bit video resources + and store the temporal columns with 8 bits. * (C) bases are represented using 2 bits/base. * (D) reads the candidates from the reference (check the impact of random accesses) */ extern "C" { #include "../../include/psa_pairwise_gpu.h" } #include <cuda_runtime.h> #include <cuda.h> #ifndef QUERIES_SIZE #define QUERIES_SIZE 100 #endif #ifndef CANDIDATES_SIZE #define CANDIDATES_SIZE 120 #endif #define MAX3(a,b,c) (MAX(MAX(a, b), c)) #define WARP_SIZE 32 #define MAX_THREADS_PER_SM 64 #define CUDA_NUM_THREADS 64 #define THREADS_PER_SEGMENT 32 #define NUM_SW_PER_BLOCK (MAX_THREADS_PER_SM / THREADS_PER_SEGMENT) #define NUM_WARPS (MAX_THREADS_PER_SM / WARP_SIZE) #define BAND_LEN 8 #define MAX_QUERY_SIZE QUERIES_SIZE #define RAW_BASES_PER_ENTRY (UINT32_LENGTH / RAW_4B_LENGTH) typedef int32_t score_type; //typedef char2 score_type2; //typedef int2 score_type2; typedef short2 score_type2; inline __device__ void update_band(int32_t idRow, char q_i, char *ref_cache, score_type *H_band, score_type *F_band, score_type2 *temp, score_type *H_maxScore, const score_type MATCH_SCORE, const score_type MISMATCH_SCORE, const score_type OPEN_INDEL_SCORE, const score_type EXTEND_INDEL_SCORE) { score_type H_diag = H_band[0]; H_band[0] = temp[idRow].x; score_type E = temp[idRow].y; #pragma unroll for (uint32_t j = 1; j <= BAND_LEN; ++j) { // update F const score_type ftop = F_band[j] + EXTEND_INDEL_SCORE; const score_type htop = H_band[j] + OPEN_INDEL_SCORE; F_band[j] = MAX(ftop, htop); // update E const score_type eleft = E + EXTEND_INDEL_SCORE; const score_type hleft = H_band[j-1] + OPEN_INDEL_SCORE; E = MAX(eleft, hleft); const char r_j = ref_cache[j-1]; const score_type diagonal = (r_j == q_i) ? H_diag + MATCH_SCORE : H_diag + MISMATCH_SCORE; const score_type top = F_band[j]; const score_type left = E; score_type hi = MAX3(left, top, diagonal); hi = MAX(hi, 0); H_diag = H_band[j]; H_band[j] = hi; (*H_maxScore) = MAX((*H_maxScore), hi); } // save the last entry of the band temp[idRow] = make_short2(H_band[BAND_LEN], E); } __global__ void localProcessSWTiling(RAWHlfEntry_t *d_refHlfRaw, uint32_t *d_refPosition, RAWHlfEntry_t *d_QueriesHlfRaw, uint32_t *d_QueriesHlfRAWposition, alignmentInfo_t *d_AlignmentsInfo, alignmentEntry_t *d_AlignmentsResults, uint32_t querySize, uint32_t candidateSize, uint32_t candidatesNum) { const uint32_t idCandidate = blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x; if (idCandidate < candidatesNum) { const uint32_t refPosition = d_refPosition[idCandidate]; const RAWHlfEntry_t* candidate = d_refHlfRaw + (refPosition / RAW_BASES_PER_ENTRY); const RAWHlfEntry_t* query = d_QueriesHlfRaw + d_QueriesHlfRAWposition[d_AlignmentsInfo[idCandidate]]; // All the scores have to be absolute numbers: // Original BWA Scores: Gap_Ex = -1; Gap_Op = -2; Match = 2; Miss = -5; const score_type MATCH_SCORE = 2; const score_type MISMATCH_SCORE = -5; const score_type OPEN_INDEL_SCORE = -2; const score_type EXTEND_INDEL_SCORE = -1; const score_type ZERO = 0; char r_cache[BAND_LEN]; score_type2 temp [MAX_QUERY_SIZE]; score_type H_band [BAND_LEN + 1]; score_type F_band [BAND_LEN + 1]; const int32_t numRows = querySize, numColumns = candidateSize; int32_t idColumn, idRow, idBand; score_type H_maxScore = ZERO; uint32_t entryCandidate, actualEntryCandidate, lastEntryCandidate, idCandidateEntry = 0; for(idBand = 0; idBand < MAX_QUERY_SIZE; ++idBand){ temp[idBand].x = ZERO; temp[idBand].y = ZERO; } lastEntryCandidate = candidate[idCandidateEntry]; // Compute Score SW-GOTOH for(idColumn = 0; idColumn < numColumns; idColumn += BAND_LEN){ uint32_t entryQuery, idQueryEntry = 0; if((idColumn % RAW_BASES_PER_ENTRY) == 0){ const uint32_t alignment = refPosition % RAW_BASES_PER_ENTRY; idCandidateEntry++; actualEntryCandidate = candidate[idCandidateEntry]; entryCandidate = actualEntryCandidate << (RAW_4B_LENGTH * (RAW_BASES_PER_ENTRY - alignment)) | lastEntryCandidate >> (RAW_4B_LENGTH * alignment); lastEntryCandidate = actualEntryCandidate; } // Load a block of entries from the reference #pragma unroll for (uint32_t idBand = 0; idBand < BAND_LEN; ++idBand){ r_cache[idBand] = entryCandidate & 0x3; entryCandidate >>= RAW_4B_LENGTH; } // Initialize the first band #pragma unroll for (uint32_t idBand = 0; idBand <= BAND_LEN; ++idBand){ H_band[idBand] = ZERO; F_band[idBand] = ZERO; } #pragma unroll 4 for(idRow = 0; idRow < numRows; ++idRow){ entryQuery >>= RAW_4B_LENGTH; if((idRow % RAW_BASES_PER_ENTRY) == 0){ entryQuery = query[idQueryEntry]; idQueryEntry++; } update_band(idRow, entryQuery & 0x03, r_cache, H_band, F_band, temp, &H_maxScore, MATCH_SCORE, MISMATCH_SCORE, OPEN_INDEL_SCORE, EXTEND_INDEL_SCORE); } } d_AlignmentsResults[idCandidate].score = H_maxScore; d_AlignmentsResults[idCandidate].column = 0; } } extern "C" psaError_t localProcessPairwiseReference(sequences_t* references, sequences_t *candidates, sequences_t *queries, alignments_t *alignments) { uint32_t blocks = DIV_CEIL(candidates->num, CUDA_NUM_THREADS); uint32_t threads = CUDA_NUM_THREADS; uint32_t querySize = queries->h_size[0]; uint32_t candidateSize = candidates->h_size[0]; cudaThreadSetCacheConfig(cudaFuncCachePreferL1); printf("Grid Size: %d, Block Size: %d, Total alignments: %d, BAND_LEN: %d \n", blocks, threads, candidates->num, BAND_LEN); localProcessSWTiling<<<blocks, threads>>>(references->d_HlfRAW, candidates->d_refPosition, queries->d_HlfRAW, queries->d_HlfRAWposition, alignments->d_info, alignments->d_results, querySize, candidateSize, candidates->num); cudaThreadSynchronize(); return (SUCCESS); }
11,743
#include <stdlib.h> #include <stdio.h> #include <sys/time.h> static const double kMicro = 1.0e-6; double getTime() { struct timeval TV; struct timezone TZ; const int RC = gettimeofday(&TV, &TZ); if(RC == -1) { printf("ERROR: Bad call to gettimeofday\n"); return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } __global__ void Mirroring(double *e_prev, int with, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; int idx = (row+1)*(n+2)+(column+1); if (row == 0 && column<n) e_prev[idx - (n+2)] = e_prev[idx + (n+2)]; if (row == (n-1) && column<n) e_prev[idx + (n+2)] = e_prev[idx - (n+2)]; if (column == 0 && row<n) e_prev[idx - 1] = e_prev[idx + 1]; if (column == (n-1) && row<n) e_prev[idx + 1] = e_prev[idx - 1]; } __global__ void PDE(double *e, double *e_prev, int with, int n, const double alpha) { int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; int idx = (row+1)*(n+2)+(column+1); if (column<n && row<n) e[idx] = e_prev[idx]+alpha*(e_prev[idx+1] + e_prev[idx-1] - 4*e_prev[idx] + e_prev[idx+(n+2)] + e_prev[idx-(n+2)]); } __global__ void ODE2(double *e, double *r, int with, int n, const double dt, const double epsilon, const double M1, const double M2, const double kk, const double b) { int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; int idx = (row+1)*(n+2)+(column+1); if (column<n && row<n) r[idx] = r[idx] + dt*(epsilon+M1* r[idx]/( e[idx]+M2))*(-r[idx]-kk* e[idx]*(e[idx]-b-1)); } __global__ void ODE1(double *e, double *r, int with, int n, const double dt, const double kk, const double a) { int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; int idx = (row+1)*(n+2)+(column+1); if (column<n && row<n) e[idx] = e[idx] -dt*(kk* e[idx]*(e[idx] - a)*(e[idx]-1)+ e[idx] *r[idx]); } void simulate (double* d_E, double* d_E_prev, double* d_R, const double alpha, const int n, const int m, const double kk, const double dt, const double a, const double epsilon, const double M1,const double M2, const double b) { int s2 = 32; dim3 DimGrid2(ceil((double)m/s2),ceil((double)n/s2), 1); dim3 DimBlock2(s2, s2, 1); Mirroring<<<DimGrid2, DimBlock2>>>(d_E_prev, m,n); PDE<<<DimGrid2, DimBlock2>>>(d_E, d_E_prev, m, n, alpha); ODE1<<<DimGrid2, DimBlock2>>>(d_E, d_R, m, n, dt, kk, a); ODE2<<<DimGrid2, DimBlock2>>>(d_E, d_R, m, n, dt, epsilon, M1, M2,kk,b); } int main() { int m = 200, n=200; double T=1000.0; const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5; double *E, *E_prev, *R; double *d_E, *d_E_prev, *d_R; int num_bytes = (m+2)*(n+2)*sizeof(double); E = (double*)malloc(num_bytes); E_prev = (double*)malloc(num_bytes); R = (double*)malloc(num_bytes); cudaMalloc((void**)&d_E, num_bytes); cudaMalloc((void**)&d_E_prev, num_bytes); cudaMalloc((void**)&d_R, num_bytes); // initialization for (int j=1; j<=m; j++) for (int i=1; i<=n; i++) E_prev[j*(n+2)+i] = R[j*(n+2)+i] = 0; // initialization for (int j=1; j<=m; j++) for (int i=n/2+1; i<=n; i++) E_prev[j*(n+2)+i] = 1.0; // initialization for (int j=m/2+1; j<=m; j++) for (int i=1; i<=n; i++) R[j*(n+2)+i] = 1.0; cudaMemcpy(d_E_prev, E_prev, num_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_R, R, num_bytes, cudaMemcpyHostToDevice); double dx = 1.0/n; double t = 0.0; int cnt=0; double rp= kk*(b+1)*(b+1)/4; double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk)); double dtr=1/(epsilon+((M1/M2)*rp)); double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr; double alpha = d*dt/(dx*dx); double t0 = getTime(); while (t<T) { t += dt; cnt++; simulate(d_E, d_E_prev, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b); //swap current E with previous E double *tmp = d_E; d_E = d_E_prev; d_E_prev = tmp; } cudaMemcpy(E, d_E, num_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(E_prev, d_E_prev, num_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(R, d_R, num_bytes, cudaMemcpyDeviceToHost); double time_elapsed = getTime() - t0; printf("Elapsed Time (sec) %g\n",time_elapsed); FILE * fp = fopen("v1.txt","w"); for (int i=1; i<m+1; i++) for (int j=1; j<n+1; j++) fprintf(fp,"%f\n", E[i*(n+2)+j]); fclose(fp); free(E); free(E_prev); free(R); cudaFree(d_E); cudaFree(d_E_prev); cudaFree(d_R); return 0; }
11,744
/* * Copyright 2014. All rights reserved. * * CUDA Kernel Device code * Rowan Hughes */ #define SDATA(index) sdata[index] #define SMEM(X, Y) sdata[(Y)*bw+(X)] extern "C" __global__ void copyReductionKernel(float4* g_idata, float* g_odata, int chanels, int sizeData, int offset) { // shared memory // the size is determined by the host application extern __shared__ float sdata[]; const int dId = (blockDim.x * blockIdx.x + threadIdx.x); if(dId >= sizeData) return; const unsigned int tIdC = chanels * threadIdx.x; const int pixelIdIn = offset + dId; const int tIdMax = sizeData - blockDim.x*blockIdx.x; // load data from global to shared memory float4 ldata = g_idata[pixelIdIn]; //[thetaDot1, thetaDot2, ttc, first] if(ldata.z >= 0 && ldata.x != 0 ) { // it is a pixel belonging to an object SDATA(tIdC ) = ldata.x; // thetaDot1 SDATA(tIdC+1) = ldata.y; // thetaDot2 SDATA(tIdC+2) = ldata.z; // ttc SDATA(tIdC+3) = 1; // go first if taking thetaDot1 SDATA(tIdC+4) = 1; // go first if taking thetaDot2 if(ldata.z < 3 && ldata.w <= 0) // ttc < 3s and giving a way { if(abs(ldata.x) < abs(ldata.y)) { SDATA(tIdC+3) = -1; // go second if taking thetaDot1 => slow down } else { SDATA(tIdC+4) = -1; // go second if taking thetaDot2 => slow down } } } else { // it is a background pixel SDATA(tIdC+2) = -1; } __syncthreads(); // perform reduction for (unsigned int i=blockDim.x*0.5; i>0; i>>=1) { if(threadIdx.x < i && (threadIdx.x + i < tIdMax)) { int ic = chanels*i+tIdC; if(SDATA(ic+2) >= 0) // if ttc2 >= 0 { if(SDATA(tIdC+2) >= 0) // if ttc1 >= 0 { SDATA(tIdC ) = min(SDATA(tIdC ), SDATA(ic )); SDATA(tIdC+1) = max(SDATA(tIdC+1), SDATA(ic+1)); SDATA(tIdC+2) = min(SDATA(tIdC+2), SDATA(ic+2)); SDATA(tIdC+3) = min(SDATA(tIdC+3), SDATA(ic+3)); SDATA(tIdC+4) = min(SDATA(tIdC+4), SDATA(ic+4)); } else { SDATA(tIdC ) = SDATA(ic); SDATA(tIdC+1) = SDATA(ic+1); SDATA(tIdC+2) = SDATA(ic+2); SDATA(tIdC+3) = SDATA(ic+3); SDATA(tIdC+4) = SDATA(ic+4); } } } __syncthreads(); } // write data to global memory if(threadIdx.x==0) { int bc = chanels*blockIdx.x; g_odata[bc] = SDATA(0); g_odata[bc+1] = SDATA(1); g_odata[bc+2] = SDATA(2); g_odata[bc+3] = SDATA(3); g_odata[bc+4] = SDATA(4); } }
11,745
#include <iostream> #include <string> /* __global__ void getMeSize(int *result) { result[0]=sizeof(void*); } __global__ void t(int *d){ atomicAdd(d, 1); } */ int main() { /* #ifdef __CUDACC__ std::cout << "__CUDACC__" << "\n"; #else std::cout << "No __CUDACC__" << "\n"; #endif #ifdef __NVCC__ std::cout << "__NVCC__" << "\n"; #else std::cout << "No __NVCC__" << "\n"; #endif #ifdef __CUDA_ARCH__ std::cout << "__CUDA_ARCH__" << "\n"; #else std::cout << "No __CUDA_ARCH__" << "\n"; #endif */ return 0; }
11,746
#include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <unistd.h> #include <cuda.h> #include <string.h> #include <math.h> // consider making wfs into a float4 array, with the wfs from 4 els stacked side-by-side -> less loop iters // command to check gpu device info /*** ./usr/local/cuda-9.2/extras/demo_suite/deviceQuery ***/ // compile command /*** nvcc -Xptxas=-O3,-v -use_fast_math -maxrregcount=20 fieldMaker_paramTester.cu ***/ // physical constants #define CW 1.480 // water sound speed (mm/us) // receive system specifics #define RECLEN 8191 // length of acquired signal #define NELS 160 // number of receiver elements #define ADC_CLOCK 20 // digitizer clock (MHz) // prescribed constant #define DT 111.7 // time-of-flight = ( element-to-origin (100 us) + transmit-system-specific delays (10.5 us)) // derived constants to take some load off gpu #define DT_TIMES_ADC_CLOCK ( DT * ADC_CLOCK ) #define ADC_CLOCK_OVER_CW ( ADC_CLOCK / CW ) //~ const float ADC_CLOCK_OVER_CW = ADC_CLOCK/CW; //~ const float DT_TIMES_ADC_CLOCK = DT*ADC_CLOCK; // center point of the pressure field being calculated #define FIELD_X0 0.0 #define FIELD_Y0 0.0 #define FIELD_Z0 0.0 // size of the pressure field to calculate ( units = mm ) #define FIELD_DIM_X 20.0 #define FIELD_DIM_Y 20.0 #define FIELD_DIM_Z 20.0 // constants for gpu #define BLOCK_DIM_X 8 #define BLOCK_DIM_Y 16 #define BLOCK_DIM_Z 16 // max threads = 1024 (GPU specific, this is for laptop) [8*8*16=1024] #define THREAD_DIM_X 16 #define THREAD_DIM_Y 8 #define THREAD_DIM_Z 8 //works best if THREAD_DIM_Z < THREAD_DIM_X/Y #define BLOCK_SIZE (THREAD_DIM_X*THREAD_DIM_Y*THREAD_DIM_Z) // constant to calculate indices of calculation grid #define IE (BLOCK_DIM_X*THREAD_DIM_X) #define JE (BLOCK_DIM_Y*THREAD_DIM_Y) #define KE (BLOCK_DIM_Z*THREAD_DIM_Z) // variable to hold coordinate locations of array elements __constant__ float4 arxyz[NELS]; // projects the measured signals from the array elements back into the field, sums them together __global__ void getSignalIntensityField(float *si_field, float4 *wfs, float4 *XYZ){ int x = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int y = __mul24(blockIdx.y,blockDim.y) + threadIdx.y; int z = __mul24(blockIdx.z,blockDim.z) + threadIdx.z; int sigint_idx = (z*JE + y)*IE + x; // index of field location being calculated int tidx; // index of 'wfs' vector float dx,dy,dz; // unit distances from array element to location in calculation field float4 xyz = XYZ[sigint_idx]; // makes calculation faster to load value stored in global mem into local mem #pragma unroll 20 for(int eln = 0; eln<NELS; eln++){ dx = __fsub_rn(xyz.x,arxyz[eln].x); dy = __fsub_rn(xyz.y,arxyz[eln].y); dz = __fsub_rn(xyz.z,arxyz[eln].z); tidx = __float2int_rn( __fadd_rn( __fmul_rn( __fsqrt_rn( __fadd_rn( __fadd_rn( __fmul_rn(dx,dx) , __fmul_rn(dy,dy) ) , __fmul_rn(dz,dz)) ) , ADC_CLOCK_OVER_CW), DT_TIMES_ADC_CLOCK)) + __mul24(eln,RECLEN); /****** tidx notes ****** * *** How it's calculated *** * time: t = (dx**2 + dy**2 + dz**2)**0.5/CW * array index value corresponding to time 't': idx = (t + [experiment intrinsic time offset] )*ADC_CLOCK * experiment intrinsic time offset: DT = ~111.7 us * * __mul24(eln,RECLEN) -> the signals from all 160 elements are in a single 1D vector * this offsets the index appropriately to read signal values from element 'eln' * *** equivalent expression using standard math functions (instead of cuda intrinsics) *** * tidx = sqrtf( dx*dx + dy*dy + dz*dz )*ADC_CLOCK/CW + DT * ADC_CLOCK + eln*RECLEN; ************************/ // this script loads 4 waveforms at a time, this adds the signal values from all of them together at once si_field[sigint_idx] += __fadd_rn(__fadd_rn(wfs[ tidx ].x, wfs[ tidx ].y),__fadd_rn(wfs[ tidx ].z, wfs[ tidx ].w)); } } // function to set the field to 0 __global__ void resetSignalIntensityField(float *pfield){ int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int z = blockIdx.z*blockDim.z + threadIdx.z; int sigint_idx = z*IE*JE + y*IE + x; pfield[sigint_idx] = 0; } // loads the signals from file void loadWF(float4 *wf){ float *wftmp; wftmp = (float *)malloc(RECLEN*NELS*4*sizeof(float)); FILE *fname; fname = fopen("hiampwf","rb"); fread(wftmp,sizeof(wftmp),RECLEN*NELS*4,fname); fclose(fname); for(int rl=1;rl<(NELS*RECLEN);rl++){ wf[rl].x=wftmp[rl]; wf[rl].y=wftmp[rl+NELS*RECLEN]; wf[rl].z=wftmp[rl+(NELS*RECLEN)*2]; wf[rl].w=wftmp[rl+3*NELS*RECLEN]; } for(int n=0;n<NELS;n++){ wf[n*RECLEN].x=0; wf[n*RECLEN].y=0; wf[n*RECLEN].z=0; wf[n*RECLEN].w=0; } free(wftmp); } // loads the coordinates of the receiver elements of the array void loadArray(){ float *array_tmp; array_tmp = (float *)malloc(256*3*sizeof(float)); float4 arxyz_h[NELS]; FILE *fname; fname = fopen("arrayCoords_bin","rb"); fread(array_tmp,sizeof(array_tmp),RECLEN*NELS,fname); fclose(fname); int cntr1, cntr2; cntr1 = 0; for(cntr2=0;cntr2<256;cntr2++){ if( (cntr2<128) && (cntr2%4 == 0) ){ arxyz_h[cntr1].x = array_tmp[cntr2]; arxyz_h[cntr1].y = array_tmp[cntr2+256]; arxyz_h[cntr1].z = array_tmp[cntr2+512]; cntr1++; } else if (cntr2>=128) { arxyz_h[cntr1].x = array_tmp[cntr2]; arxyz_h[cntr1].y = array_tmp[cntr2+256]; arxyz_h[cntr1].z = array_tmp[cntr2+512]; cntr1++; } } cudaMemcpyToSymbol(arxyz,arxyz_h,NELS*sizeof(float4)); free(array_tmp); } // generates a 4-vector storing the locations of all points in the field we are trying to calculate // the 4 field of the vector is unused, but in my testing it looked like it was faster to use float4 than float3 and I wasn't running up against any memory limits so I stuck with it void setFieldLocsXYZ(float4 *xyz){ int cntr1,cntr2,cntr3; int xsteps,ysteps,zsteps; xsteps = (BLOCK_DIM_X*THREAD_DIM_X); ysteps = (BLOCK_DIM_Y*THREAD_DIM_Y); zsteps = (BLOCK_DIM_Z*THREAD_DIM_Z); float x0,y0,z0; x0 = FIELD_X0 - FIELD_DIM_X/2.0; y0 = FIELD_Y0 - FIELD_DIM_Y/2.0; z0 = FIELD_Z0 - FIELD_DIM_Z/2.0; float dx,dy,dz; dx = FIELD_DIM_X/xsteps; dy = FIELD_DIM_Y/ysteps; dz = FIELD_DIM_Z/zsteps; for(cntr1=0;cntr1<xsteps;cntr1++){ for(cntr2=0;cntr2<ysteps;cntr2++){ for(cntr3=0;cntr3<ysteps;cntr3++){ xyz[(cntr1*ysteps+cntr2)*zsteps+cntr3].x = x0+cntr3*dx; xyz[(cntr1*ysteps+cntr2)*zsteps+cntr3].y = y0+cntr2*dy; xyz[(cntr1*ysteps+cntr2)*zsteps+cntr3].z = z0+cntr1*dz; } } } } int main(){ // timer variables struct timeval t0,t1; // load waveforms float4 *wf, *wf_d; wf = (float4 *)malloc( RECLEN*NELS*sizeof(float4) ); loadWF(wf); cudaMalloc( &wf_d, RECLEN*NELS*sizeof(float4) ); cudaMemcpy( wf_d, wf, RECLEN*NELS*sizeof(float4), cudaMemcpyHostToDevice ); // copy to gpu // load array coords to __constant__ memory on GPU loadArray(); // allocate memory for signal intensity field calcs, initialize to 0, and put it on the GPU float *sig_field_host, *sig_field_pinned, *sig_field_d; int nmemb = (BLOCK_DIM_X*THREAD_DIM_X)*(BLOCK_DIM_Y*THREAD_DIM_Y)*(BLOCK_DIM_Z*THREAD_DIM_Z); unsigned int sig_field_size = nmemb*sizeof(float); sig_field_host = (float *)malloc(sig_field_size); cudaMallocHost((void **)&sig_field_pinned,sig_field_size); cudaMalloc((void **)&sig_field_d,sig_field_size); memset(sig_field_host,0,sig_field_size); // initialize sig_field to 0 memcpy(sig_field_pinned,sig_field_host,sig_field_size); // copy to pinned memory cudaMemcpy(sig_field_d,sig_field_host,sig_field_size,cudaMemcpyHostToDevice); // copy to gpu // allocate memory for the spatial coordinates of the calculation grid, populate it, and put it on the GPU float4 *xyz_h, *xyz_d; int nmembxyz = (BLOCK_DIM_X*THREAD_DIM_X)*(BLOCK_DIM_Y*THREAD_DIM_Y)*(BLOCK_DIM_Z*THREAD_DIM_Z); unsigned int xyz_size = nmembxyz*sizeof(float4); xyz_h = (float4 *)malloc(xyz_size); cudaMalloc((void **)&xyz_d,xyz_size); setFieldLocsXYZ(xyz_h); cudaMemcpy(xyz_d,xyz_h,xyz_size,cudaMemcpyHostToDevice); // copy to gpu // setup cuda blocks/threads dim3 num_blocks(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); dim3 num_threads(THREAD_DIM_X, THREAD_DIM_Y, THREAD_DIM_Z); // calculate the signal intenisty in the field gettimeofday(&t0,NULL); resetSignalIntensityField <<<num_blocks, num_threads>>> ( sig_field_d ); getSignalIntensityField <<<num_blocks, num_threads>>> ( sig_field_d , wf_d, xyz_d ); cudaMemcpy(sig_field_pinned,sig_field_d,sig_field_size,cudaMemcpyDeviceToHost); gettimeofday(&t1,NULL); printf("calc time = %d us\n",t1.tv_usec-t0.tv_usec); // write the results to file FILE *fname; fname = fopen("sig_field_bin","wb"); fwrite(sig_field_pinned,sig_field_size,nmemb,fname); fclose(fname); // free allocated memory cudaFree(wf_d); free(wf); cudaFree(sig_field_d); cudaFreeHost(sig_field_pinned); free(sig_field_host); cudaFree(xyz_d); free(xyz_h); return 0; }
11,747
/* * Copyright 2018 Foundation for Research and Technology - Hellas * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 [1] [1] * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. * * Links: * ------ * [1] http://www.apache.org/licenses/LICENSE-2.0 [1] */ #include <stdio.h> #include <iostream> #include <fstream> using namespace ::std; int main() { ifstream fin; fin.open("/proc/cpuinfo", ios::in); ofstream fout; fout.open("new.txt", ios::out); char ch; char *model_name, *cpu_cores; char line[75]; while (fin.get(ch)) { fin.get(line, 75, '\n'); model_name = strstr(line, "model name"); cpu_cores = strstr(line, "cpu cores"); if (model_name != NULL) { fout << "Accelerator type is CPU \n" << model_name << endl; } else if (cpu_cores != NULL) { fout << cpu_cores << endl << "--------------------" << endl; } } // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); if (devCount > 0) { fout << "Accelerator type is NVIDIA GPU" << endl; fout << "Number of NVIDIA GPUs: " << devCount << endl; // Iterate through devices for (int i = 0; i < devCount; ++i) { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); fout << "model name " << devProp.name << endl; fout << "Multi-Processors " << devProp.multiProcessorCount << endl; cudaSetDevice(i); } } else { cout << "There is no CUDA device" << endl; } fin.close(); return 0; }
11,748
/* * Copyright 2016 Alexander Terenin * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ /* * Function : cuda_lambdaSqInv * Purpose : draws Exp(1 + theta^2) random variables * Argument n : size of sampler * Argument *u : pointer to array of uniforms * Argument *beta : pointer to beta vector * Argument *nuInv : pointer to nuInv vector * Argument *tSqInv : pointer to tauSqInv constant * Output : mutates u and stores result in its place */ extern "C" __global__ void cuda_lambdaSqInv(int n, float *u, float* beta, float *nuInv, float* tSqInv) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < n) { u[i] = (-1.0f/(nuInv[i] + (0.5f * tSqInv[0] * beta[i] * beta[i]) )) * logf(u[i]); } }
11,749
#include<stdio.h> #include<stdlib.h> #include<unistd.h> #include<cuda_runtime.h> __global__ void matmul(int *a,int *b,int *c,int WIDTH){ int rowa=blockIdx.y*blockDim.y+threadIdx.y; int colb=blockIdx.x*blockDim.x+threadIdx.x; int sum=0; for(int i=0;i<WIDTH;i++) sum+=a[rowa*WIDTH+i]*b[i*WIDTH+colb]; c[rowa*WIDTH+colb]=sum; } int main(){ int WIDTH,BLOCK_WIDTH; int *matA,*matB,*matSum; int *da,*db,*dc; printf("enter width of matrix\n"); scanf("%d",&WIDTH); BLOCK_WIDTH=WIDTH/2; printf("Enter elements of matrix A\n"); matA=(int*)malloc(sizeof(int)*WIDTH*WIDTH); for(int i=0;i<WIDTH*WIDTH;i++){ scanf("%d",&matA[i]); } printf("Enter elements of matrix B\n"); matB=(int*)malloc(sizeof(int)*WIDTH*WIDTH); for(int i=0;i<WIDTH*WIDTH;i++){ scanf("%d",&matB[i]); } matSum=(int*)malloc(sizeof(int)*WIDTH*WIDTH); cudaMalloc((void**)&da,sizeof(int)*WIDTH*WIDTH); cudaMalloc((void**)&db,sizeof(int)*WIDTH*WIDTH); cudaMalloc((void**)&dc,sizeof(int)*WIDTH*WIDTH); cudaMemcpy(da,matA,sizeof(int)*WIDTH*WIDTH,cudaMemcpyHostToDevice); cudaMemcpy(db,matB,sizeof(int)*WIDTH*WIDTH,cudaMemcpyHostToDevice); int NumBlocks=WIDTH/BLOCK_WIDTH; dim3 grid_conf(NumBlocks,NumBlocks); dim3 block_conf(BLOCK_WIDTH,BLOCK_WIDTH); matmul<<<grid_conf,block_conf>>>(da,db,dc,WIDTH); cudaMemcpy(matSum,dc,sizeof(int)*WIDTH*WIDTH,cudaMemcpyDeviceToHost); int n=WIDTH; int m=WIDTH; printf("Result: \n"); for(int i=0;i<m;i++){ for(int j=0;j<n;j++){ printf("%d ",matSum[i*n+j]); } printf("\n"); } cudaFree(da); cudaFree(db); cudaFree(dc); free(matA); free(matB); free(matSum); return 0; }
11,750
#include <stdio.h> #include <stdlib.h> #define N 32 // quantidades de numeros #define I 32 // intervalo // codigo device __global__ void soma_numeros(int *a, int *res) { __shared__ int temp[N/I]; int ind = threadIdx.x; if (ind < N/I){ int soma = 0; for (int i = I*ind; i < I*ind + I; i++) soma += a[i]; temp[ind] = soma; } __syncthreads(); int resto; int controle = I; int somaThreads = 0; while (controle <= N) { if (ind < N/controle/I) { for (int i = I*ind; i<I*ind+I; i++) somaThreads += temp[i]; temp[ind] = somaThreads; somaThreads = 0; } resto=N/controle; if (resto == 0) resto = 1; controle *= I; } __syncthreads(); if (ind == 0){ printf("%d", resto); *res = 0; for (int i = 0; i < resto; i++) { *res += temp[i]; } } } // Código host int main(){ int a[N]; int r; int* dev_a; int* dev_r; // Inicializando as variaveis do host for (int i = 0; i < N; i++) a[i] = i; // Alocando espaço para as variaveis da GPU cudaMalloc((void**)&dev_a, N*sizeof(int)); cudaMalloc((void**)&dev_r, sizeof(int)); // Copiando as variaveis da CPU para a GPU cudaMemcpy(dev_a, &a, N*sizeof(int), cudaMemcpyHostToDevice); // Chamada à função da GPU (kernel) // Número de blocos é igual à dimensão do vetor dividida pela dimensão do bloco: N/M // O tipo dim3 permite definir a quantidade de blocos e threads por dimensao. // A terceira dimensao é omitida, ficando implícito o valor 1. soma_numeros<<<1, N/I>>>(dev_a, dev_r); // Copiando o resultado da GPU para CPU cudaMemcpy(&r, dev_r, sizeof(int), cudaMemcpyDeviceToHost); // Visualizando o resultado printf("R = %d\n", r); // Liberando a memoria na GPU cudaFree(dev_a); cudaFree(dev_r); return 0; }
11,751
#include <cuda.h> #include <cufft.h> #include <cuda_profiler_api.h> #include <stdio.h> template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } extern "C" __global__ void data_transfer_real_cplx( int nx , int ny , int nz , float * in , cufftComplex * out ) { int kx = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kx < nx && ky < ny) { for (std::size_t kz = 0; kz < nz; kz++) { out[(kx*ny + ky)*nz + kz].x = ldg(&in[(kx*ny + ky)*nz + kz]); out[(kx*ny + ky)*nz + kz].y = 0; } } } extern "C" __global__ void data_transfer_cplx_real( int nx , int ny , int nz , cufftComplex * in , float * out ) { int kx = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kx < nx && ky < ny) { int k_1 = nx*ky + kx; for (int i = 0; i < nz; i++, k_1 += nx*ny) { out[k_1] = ldg(&in[k_1]).x; } } }
11,752
extern "C" __global__ void stochasticGradientDescentKernel (int length, float *parameter, float scalingFactor, float learningRate, float *gradient) { int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { parameter[index] = parameter[index] - scalingFactor * learningRate * gradient[index]; gradient[index] = 0.0; } __syncthreads(); }
11,753
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <algorithm> void ThrustSort(uint64_t *h_key_array, uint64_t *d_key_array, uint64_t number_of_elements, uint64_t batch_size) { int number_of_batches = number_of_elements / batch_size; cudaMalloc( (void**)&d_key_array, batch_size * sizeof(uint64_t) ); cudaMemcpy( d_key_array, h_key_array, batch_size * sizeof(uint64_t), cudaMemcpyHostToDevice ); thrust::device_ptr<uint64_t> th_key_array( d_key_array ); for (int i = 0; i < number_of_batches; i++) { //thrust::sort_by_key( th_key_array, th_key_array+number_of_elements, th_value_array ); thrust::sort( th_key_array, th_key_array+batch_size ); cudaMemcpy( &h_key_array[i*batch_size], d_key_array, batch_size * sizeof(uint64_t), cudaMemcpyDeviceToHost ); cudaDeviceSynchronize(); if(i == number_of_batches-1) break; cudaMemcpy( d_key_array, &h_key_array[(i+1)*batch_size], batch_size * sizeof(uint64_t), cudaMemcpyHostToDevice ); cudaDeviceSynchronize(); } return; }
11,754
__global__ void wave1Dlax(double * f_next, double * f, double u, double dt, double dx, int N){ int tid = threadIdx.x+blockIdx.x*blockDim.x; if(tid<N){ int x_p = tid+1; if(x_p ==N) x_p = 0; int x_m = tid-1; if(x_m<0) x_m = N-1; double f_p = f[x_p]; double f_m = f[x_m]; f_next[tid]=0.5*(f_p + f_m) - (u*dt/(2.*dx))*(f_p - f_m); } }
11,755
extern "C" { __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_14349(float*, float*); __global__ void lambda_14462(float*, float*); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_14349(float* _14352_15506, float* _14353_15507) { int threadIdx_x_15513; int pthreadIdx_x_15513; int blockDim_x_15519; int pblockDim_x_15519; int blockIdx_x_15525; int pblockIdx_x_15525; int threadIdx_y_15531; int pthreadIdx_y_15531; int blockDim_y_15537; int pblockDim_y_15537; int blockIdx_y_15543; int pblockIdx_y_15543; threadIdx_x_15513 = threadIdx_x(); pthreadIdx_x_15513 = threadIdx_x_15513; l15511: ; threadIdx_x_15513 = pthreadIdx_x_15513; blockDim_x_15519 = blockDim_x(); pblockDim_x_15519 = blockDim_x_15519; l15517: ; blockDim_x_15519 = pblockDim_x_15519; blockIdx_x_15525 = blockIdx_x(); pblockIdx_x_15525 = blockIdx_x_15525; l15523: ; blockIdx_x_15525 = pblockIdx_x_15525; threadIdx_y_15531 = threadIdx_y(); pthreadIdx_y_15531 = threadIdx_y_15531; l15529: ; threadIdx_y_15531 = pthreadIdx_y_15531; blockDim_y_15537 = blockDim_y(); pblockDim_y_15537 = blockDim_y_15537; l15535: ; blockDim_y_15537 = pblockDim_y_15537; blockIdx_y_15543 = blockIdx_y(); pblockIdx_y_15543 = blockIdx_y_15543; l15541: ; blockIdx_y_15543 = pblockIdx_y_15543; int _15548; _15548 = blockDim_x_15519 * blockIdx_x_15525; int _15545; _15545 = blockDim_y_15537 * blockIdx_y_15543; int _15549; _15549 = threadIdx_x_15513 + _15548; int _15546; _15546 = threadIdx_y_15531 + _15545; int _15547; _15547 = 4096 * _15546; int _15550; _15550 = _15547 + _15549; float* _15555; _15555 = _14352_15506 + _15550; float* _15551; _15551 = _14353_15507 + _15550; float _15552; _15552 = *_15551; float _15557; _15557 = _15552; *_15555 = _15557; return ; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_14462(float* _14465_15562, float* _14466_15563) { int threadIdx_x_15566; int pthreadIdx_x_15566; int blockDim_x_15569; int pblockDim_x_15569; int blockIdx_x_15572; int pblockIdx_x_15572; int threadIdx_y_15575; int pthreadIdx_y_15575; int blockDim_y_15578; int pblockDim_y_15578; int blockIdx_y_15581; int pblockIdx_y_15581; threadIdx_x_15566 = threadIdx_x(); pthreadIdx_x_15566 = threadIdx_x_15566; l15564: ; threadIdx_x_15566 = pthreadIdx_x_15566; blockDim_x_15569 = blockDim_x(); pblockDim_x_15569 = blockDim_x_15569; l15567: ; blockDim_x_15569 = pblockDim_x_15569; blockIdx_x_15572 = blockIdx_x(); pblockIdx_x_15572 = blockIdx_x_15572; l15570: ; blockIdx_x_15572 = pblockIdx_x_15572; threadIdx_y_15575 = threadIdx_y(); pthreadIdx_y_15575 = threadIdx_y_15575; l15573: ; threadIdx_y_15575 = pthreadIdx_y_15575; blockDim_y_15578 = blockDim_y(); pblockDim_y_15578 = blockDim_y_15578; l15576: ; blockDim_y_15578 = pblockDim_y_15578; blockIdx_y_15581 = blockIdx_y(); pblockIdx_y_15581 = blockIdx_y_15581; l15579: ; blockIdx_y_15581 = pblockIdx_y_15581; int _15585; _15585 = blockDim_x_15569 * blockIdx_x_15572; int _15586; _15586 = threadIdx_x_15566 + _15585; int _15582; _15582 = blockDim_y_15578 * blockIdx_y_15581; int _15583; _15583 = threadIdx_y_15575 + _15582; int _15584; _15584 = 4096 * _15583; int _15587; _15587 = _15584 + _15586; float* _15591; _15591 = _14466_15563 + _15587; float* _15588; _15588 = _14465_15562 + _15587; float _15589; _15589 = *_15588; float _15592; _15592 = _15589; *_15591 = _15592; return ; } }
11,756
#include "includes.h" __global__ void reduction_unrolling_blocks4(int * input, int * temp, int size) { int tid = threadIdx.x; int BLOCK_OFFSET = blockIdx.x * blockDim.x * 4; int index = BLOCK_OFFSET + tid; int * i_data = input + BLOCK_OFFSET; if ((index + 3 * blockDim.x) < size) { int a1 = input[index]; int a2 = input[index + blockDim.x]; int a3 = input[index+ 2* blockDim.x]; int a4 = input[index+ 3 *blockDim.x]; input[index] = a1 + a2 + a3 + a4; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2) { if (tid < offset) { i_data[tid] += i_data[tid + offset]; } __syncthreads(); } if (tid == 0) { temp[blockIdx.x] = i_data[0]; } }
11,757
//#undef NDEBUG // //#include <iostream> //#include <cassert> // //#include <inttypes.h> // //#include <cuda_runtime.h> //#include <device_launch_parameters.h> //#include <device_functions.h> // //#include <opencv2/core.hpp> //#include <opencv2/core/cuda.hpp> //#include <opencv2/cudaimgproc.hpp> //#include <opencv2/cudafeatures2d.hpp> //#include <opencv2/cudaarithm.hpp> //#include <opencv2/cudawarping.hpp> //#include <opencv2/cudafilters.hpp> //#include <opencv2/imgproc.hpp> //#include <opencv2/imgcodecs.hpp> //#include <opencv2/highgui.hpp> // //#include "cuda_call_checker.cuh" //#include "affine_transformer_gpu.cuh" //#include "fractale_compressor_gpu_rgb.cuh" // ///* // Ŝ4x4CubNTCY2x2̎ // { 0, 1, 2, 3, { 0, 1, 4, 5, // 4, 5, 6, 6, 2, 3, 6, 7, // 8, 9,10,11, 8, 9,12,13, // 12,13,14,14} -> 10,11,14,15} // ƕёւCubNTCY̓J[lĂяoŕύX // fc_arrangement<<<(grid_x, grid_y), (block_x, block_y, n)>>>(i, o, size); // eubN //*/ ////H ////__global__ void fc_arrangement(uint8_t* d_original_img, uint8_t* d_arrangement_img, uint32_t block_size){ //// extern uint8_t sm[]; //// //// //ubN̐ //// uint32_t blocks_num = gridDim.x; //// //ubN̑傫 //// uint32_t block_total = blockDim.x * blockDim.y; //// //// //// //// //ubN̔z̐擪index //// uint32_t dst_block_index = (blockIdx.y * blocks_num + blockIdx.x) * block_total; //// //Xbhindex //// uint32_t dst_thread_index = threadIdx.y * blockDim.y + threadIdx.x; //// //// //eubN̉flꎞIɕێ //// sm[dst_block_index + dst_thread_index]; //// //// __syncthreads(); ////} // ///* // ʓIȉf̕т̔zɕۑꂽ摜c1/2{ɏk // 4_̕ϒlZo邾̕ //*/ //__global__ void fc_resize(uint8_t* d_original_img, // uint8_t* d_resize_img, // uint32_t original_width) //{ // uint32_t xx = threadIdx.x + blockIdx.x * blockDim.x; // uint32_t yy = threadIdx.y + blockIdx.y * blockDim.y; // uint32_t ix = xx << 1; // uint32_t iy = yy << 1; // // uint32_t idx1 = d_original_img[(iy * original_width) + ix]; // uint32_t idx2 = d_original_img[(iy * original_width) + ix + 1]; // uint32_t idx3 = d_original_img[((iy + 1) * original_width) + ix]; // uint32_t idx4 = d_original_img[((iy + 1) * original_width) + ix + 1]; // // d_resize_img[yy * (original_width >> 1)+ xx] = (uint8_t)((idx1 + idx2 + idx3 + idx4) >> 2); //} // ///* //input: // uint8_t* d_orig_img : ʓIȉ摜̌` //output: // uint8_t* d_ranges :ubNꂽ摜̌` // uint8_t* d_domains :ubNꂽ摜̌` 摜TCY1/2 ubNTCY͓ // // tN^kɕKvȉ摜z𐶐 // J[lĂԎ̃ubNŃhCikjCW̃ubN肷 // fc_make_range_n_domain<<<grid, (block_x, block_y)>>> // block_x, block_yubN̑傫ɂȂ //*/ //__global__ void fc_make_domains_n_ranges(uint8_t* d_orig_img, // uint8_t* d_ranges, // uint8_t* d_domains){ // // uint32_t rdblock_id = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y; // uint32_t rdblock_thread_id = blockDim.x * threadIdx.y + threadIdx.x; // uint32_t rdblock_array_id = rdblock_id + rdblock_thread_id; // // uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; // uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; // uint32_t normal_array_id = y * (gridDim.x * blockDim.x) + x; // // d_ranges[rdblock_array_id] = d_orig_img[normal_array_id]; // // if( (blockIdx.x >= (gridDim.x >> 1) ) || ( blockIdx.y >= (gridDim.y >> 1) ) ){ // return; // } // // uint32_t half_id = (blockIdx.y * (gridDim.x >> 1) + blockIdx.x) * blockDim.x * blockDim.y; // // uint32_t idx1 = d_orig_img[2 * y * (gridDim.x * blockDim.x) + 2 * x]; // uint32_t idx2 = d_orig_img[2 * y * (gridDim.x * blockDim.x) + 2 * x + 1]; // uint32_t idx3 = d_orig_img[(2 * y + 1) * (gridDim.x * blockDim.x) + 2 * x]; // uint32_t idx4 = d_orig_img[(2 * y + 1) * (gridDim.x * blockDim.x) + 2 * x + 1]; // // d_domains[half_id + rdblock_thread_id] = (uint8_t)((idx1 + idx2 + idx3 + idx4) >> 2); //} // ///* // ehC̑aCŏlőlvZ //*/ //__global__ void fc_domain_summimmax(uint8_t* d_domains, // uint32_t dblock_count, // uint32_t* dblock_sum, // uint32_t* dblock_min, // uint32_t* dblock_max) //{ // //sum, min, max3ۑ // __shared__ uint32_t domain_summinmax[THREADBLOCK_MAX * 3]; // uint32_t dblock_id = blockIdx.x * blockDim.z + threadIdx.z; // uint32_t dblock_thread_id = threadIdx.y * blockDim.x + threadIdx.x; // uint32_t dblock_array_id = dblock_id + dblock_thread_id; // // uint32_t smem_block_id = threadIdx.z; // uint32_t smem_thread_id = dblock_thread_id; // uint32_t smem_array_sum_id = smem_block_id * (blockDim.x * blockDim.y) + smem_thread_id; // uint32_t smem_array_min_id = smem_array_sum_id + THREADBLOCK_MAX; // uint32_t smem_array_max_id = smem_array_min_id + THREADBLOCK_MAX; // // if (smem_array_sum_id == THREADBLOCK_MAX) { // printf("asdasfawdfja@opwjgf@paeo"); // // } // // uint8_t pixel = d_domains[dblock_array_id]; // //sump // domain_summinmax[smem_array_sum_id] = pixel; // //minp // domain_summinmax[smem_array_min_id] = pixel; // //maxp // domain_summinmax[smem_array_max_id] = pixel; // // __syncthreads(); // // for(int32_t i = (blockDim.x * blockDim.y) / 2; i > 0; i >>= 1){ // if(smem_thread_id < i){ // //sum // domain_summinmax[smem_array_sum_id] += domain_summinmax[smem_array_sum_id + i]; // //min // if(domain_summinmax[smem_array_min_id] > domain_summinmax[smem_array_min_id + i]){ // domain_summinmax[smem_array_min_id] = domain_summinmax[smem_array_min_id + i]; // } // //max // if (domain_summinmax[smem_array_max_id] < domain_summinmax[smem_array_max_id + i]) { // domain_summinmax[smem_array_max_id] = domain_summinmax[smem_array_max_id + i]; // } // } // __syncthreads(); // } // // //ۑ // if (dblock_thread_id == 0) { // dblock_sum[dblock_id] = domain_summinmax[smem_array_sum_id]; // dblock_min[dblock_id] = domain_summinmax[smem_array_min_id]; // dblock_max[dblock_id] = domain_summinmax[smem_array_max_id]; // }; //} // ///* // eW̑aCŏlőlvZ // // //*/ //__global__ void fc_range_summimmax(uint8_t* d_ranges, // uint32_t rblock_count, // uint32_t* rblock_sum, // uint32_t* rblock_min, // uint32_t* rblock_max) //{ // //sum, min, max3ۑ // __shared__ uint32_t range_summinmax[THREADBLOCK_MAX * 3]; // uint32_t rblock_id = blockIdx.x * blockDim.z + threadIdx.z; // uint32_t rblock_thread_id = threadIdx.y * blockDim.x + threadIdx.x; // uint32_t rblock_array_id = rblock_id + rblock_thread_id; // // uint32_t smem_block_id = threadIdx.z; // uint32_t smem_thread_id = rblock_thread_id; // uint32_t smem_array_sum_id = smem_block_id * (blockDim.x * blockDim.y) + smem_thread_id; // uint32_t smem_array_min_id = smem_array_sum_id + THREADBLOCK_MAX; // uint32_t smem_array_max_id = smem_array_min_id + THREADBLOCK_MAX; // // uint8_t pixel = d_ranges[rblock_array_id]; // //sump // range_summinmax[smem_array_sum_id] = pixel; // //minp // range_summinmax[smem_array_min_id] = pixel; // //maxp // range_summinmax[smem_array_max_id] = pixel; // // __syncthreads(); // // for (int32_t i = (blockDim.x * blockDim.y) / 2; i > 0; i >>= 1) { // if (smem_thread_id < i) { // //sum // range_summinmax[smem_array_sum_id] += range_summinmax[smem_array_sum_id + i]; // //min // if (range_summinmax[smem_array_min_id] > range_summinmax[smem_array_min_id + i]) { // range_summinmax[smem_array_min_id] = range_summinmax[smem_array_min_id + i]; // } // //max // if (range_summinmax[smem_array_max_id] < range_summinmax[smem_array_max_id + i]) { // range_summinmax[smem_array_max_id] = range_summinmax[smem_array_max_id + i]; // } // } // __syncthreads(); // } // // //ۑ // if (rblock_thread_id == 0) { // //if(rblock_id == 0){ // // printf("sum : %" PRIu32 "\n", range_summinmax[smem_array_sum_id]); // // printf("min : %" PRIu32 "\n", range_summinmax[smem_array_min_id]); // // printf("max : %" PRIu32 "\n", range_summinmax[smem_array_max_id]); // //} // rblock_sum[rblock_id] = range_summinmax[smem_array_sum_id]; // rblock_min[rblock_id] = range_summinmax[smem_array_min_id]; // rblock_max[rblock_id] = range_summinmax[smem_array_max_id]; // }; //} // ///* //input: // uint32_t* d_dblock_sum :ehCubN̑a // uint32_t* d_dblock_min :ehCubN̍ŏl // uint32_t* d_dblock_max :ehCubN̍ől // uint32_t* d_rblock_sum :eWubN̑a // uint32_t* d_rblock_min :eWubN̍ŏl // uint32_t* d_rblock_max :eWubN̍ől // uint32_t dr_block_pixel_total :ubN̉f //output: // double* d_contrast_scaling :ehCubN̊eWubNɑ΂œKXP[O // uint32_t* d_brightness_shift :ehCubN̊eWubNɑ΂œKPxVtg // //call: // dim3 fc3block(THREADBLOCK_MAX); // dim3 fc3grid(dblock_count, rblock_count / THREADBLOCK_MAX); // fc_calc_scale_n_shift<<<fc3grid, fc3block>>> // //‚̃XbhubNŕ̃WubN̍ŏlCőlvZ //*/ //__global__ void fc_calc_scale_n_shift(uint32_t* d_dblock_sum, // uint32_t* d_dblock_min, // uint32_t* d_dblock_max, // uint32_t* d_rblock_sum, // uint32_t* d_rblock_min, // uint32_t* d_rblock_max, // uint32_t dr_block_pixel_total, // double* d_contrast_scaling, // uint32_t* d_brightness_shift) //{ // uint32_t dblock_id = blockIdx.x; // uint32_t rblock_id = blockIdx.y * blockDim.x + threadIdx.x; // uint32_t array_id = blockIdx.x * (gridDim.y * blockDim.x) + rblock_id; // // //PxVtgvZ // int32_t shift = ((int32_t)d_dblock_sum[dblock_id] - (int32_t)d_rblock_sum[rblock_id]) / dr_block_pixel_total; // d_brightness_shift[array_id] = shift; // //RgXgXP[O // double d = (double)(d_dblock_max[dblock_id] - d_dblock_min[dblock_id]); // double r = (double)(d_rblock_max[rblock_id] - d_rblock_min[rblock_id]); // // double raw_scaling = r / d; // d_contrast_scaling[array_id] = raw_scaling; // // /* // TODO {I4bitɏkKvLׁC // XP[Ȍ͈kĕێKv // */ // // //double min; // //double max; // //uint32_t scaling; // //for (min = -0.03125, max = 0.03125, scaling = 0; scaling < 16; min += 0.0625, max += 0.0625, scaling++) { // // if (min < raw_scaling && raw_scaling < max){ // // d_brightness_shift[array_id] = scaling; // // return; // // } // //} // ////0.9625ȏ͑S15ɁEEEH // //d_brightness_shift[array_id] = 0xF; //} // ///* // dim3 fc4block(dr_blocksize, dr_blocksize, THREADBLOCK_MAX / dr_block_pixel_total); // dim3 fc4grid(dblock_count , rblock_count / fc4block.z); // fc_transform_n_calc_mse<<<fc4grid, fc4block>>> //*/ //__global__ void fc_transform_n_calc_mse(uint8_t* d_domains, // uint8_t* d_ranges, // double* d_contrast_scaling, // uint32_t* d_brightness_shift, // uint32_t* mse) //{ // __shared__ uint32_t mse_all[THREADBLOCK_MAX]; // // uint32_t drblock_thread_id = threadIdx.y * blockDim.x + threadIdx.x; // uint32_t drblock_pixel_total = blockDim.x * blockDim.y; // uint32_t dblock_id = blockIdx.x; // uint32_t dblock_count = gridDim.x; // uint32_t rblock_id = blockIdx.y * blockDim.z + threadIdx.z; // uint32_t rblock_count = gridDim.y * blockDim.z; // // uint32_t array_id = dblock_id * rblock_count + rblock_id; // // uint32_t smem_array_id = threadIdx.z * drblock_pixel_total + drblock_thread_id; // uint32_t smem_block_id = threadIdx.z * drblock_pixel_total; // uint32_t smem_thread_id = drblock_thread_id; // // //̃WɃhCɓKscaling, shift // uint32_t shift = d_brightness_shift[array_id]; // double scale = d_contrast_scaling[array_id]; // // double f_d_p = scale * (double)d_domains[dblock_id + drblock_thread_id] + (double)shift; // // if(f_d_p < 0){ // f_d_p = -f_d_p; // } // //uint8_tɃLXg_ōől255ɌŒ肳 // uint8_t fixed_dpixel = (uint8_t)f_d_p; // int32_t fixed_dpixel2 = (int32_t)fixed_dpixel * (int32_t)fixed_dpixel; // //IWiW // uint8_t rpixel = d_ranges[rblock_id + drblock_thread_id]; // // for(int32_t rotate = 0; rotate < 8; rotate++){ // int32_t fixed_dpixel2 = (int32_t)fixed_dpixel * (int32_t)fixed_dpixel; // int32_t rpixel2 = (int32_t)rpixel * (int32_t)rpixel; // int32_t diff = fixed_dpixel2 - rpixel2; // diff = diff < 0 ? -diff : diff; // uint32_t diff_abs = diff; // mse_all[smem_array_id] = diff_abs; // // for (int32_t j = drblock_pixel_total / 2; j > 0; j >>= 1) { // if(smem_thread_id < j){ // mse_all[smem_array_id] += mse_all[smem_array_id + j]; // } // __syncthreads(); // } // if(smem_thread_id == 0){ // mse[rotate * dblock_count * rblock_count + dblock_id * rblock_count + rblock_id] = mse_all[smem_array_id]; // } // if (rotate < 7) { // if (blockDim.x == 4) { // rpixel = d_ranges[rblock_id + dc_affine_transform_size4[rotate][drblock_thread_id]]; // } // else if (blockDim.x == 8) { // rpixel = d_ranges[rblock_id + dc_affine_transform_size8[rotate][drblock_thread_id]]; // } // else if (blockDim.x == 16) { // rpixel = d_ranges[rblock_id + dc_affine_transform_size16[rotate][drblock_thread_id]]; // } // } // __syncthreads(); // } //} // ///* // eW̓덷ŏlłhC(h܂)index_NVŋ߁CeWێ //*/ //__global__ void fc_save_min_mse(uint32_t* d_mse, // double* d_cotrast_scaling, // uint32_t* d_brightness_shift, // compress_data_part_rgb_gpu* d_compress_data_part_gpu) //{ // //} // ///* // // // 摜c1/2{ɏk // 4_̕ϒlZo邾̕ // J[lŃubNTCY킹 // ubNTCY // Jl[ // fc_resize2<<<(grid_x, grid_y), (block_x, block_y, n)>>>(i, o, size); //*/ //__global__ void fc_resize2(uint8_t* d_original_img, uint8_t* d_resize_img, uint32_t original_width) { // //blockDim.x, blockDim.y̓TCỸubNTCY // //block͕̐̂ϊOň // uint32_t blocks_num = gridDim.x;//original_width / blockDim.x; // // //resizeubN̑傫 // uint32_t resize_block_total = blockDim.x * blockDim.y; // //resizeubN̔z̐擪index // uint32_t resize_block_index = (blockIdx.y * blocks_num + blockIdx.x) * resize_block_total; // //resizeubNXbhindex // uint32_t resize_thread_index = threadIdx.y * blockDim.y + threadIdx.x; // // //ubN̑傫 // uint32_t orig_block_total = resize_block_total << 2; // //ubN̑傫̔z̐擪index // uint32_t orig_block_index = (blockIdx.y * blocks_num + blockIdx.x) * orig_block_total; // //resizeubNXbhindex1 // uint32_t orig_thread_index1 = (threadIdx.y << 1) * (blockDim.y << 1) + (threadIdx.x << 1); // //resizeubNXbhindex2 // uint32_t orig_thread_index2 = (threadIdx.y << 1) * (blockDim.y << 1) + (threadIdx.x << 1) + 1; // //resizeubNXbhindex3 // uint32_t orig_thread_index3 = ((threadIdx.y << 1) + 1) * (blockDim.y << 1) + (threadIdx.x << 1); // //resizeubNXbhindex4 // uint32_t orig_thread_index4 = ((threadIdx.y << 1) + 1) * (blockDim.y << 1) + (threadIdx.x << 1) + 1; // // //resizeubNXbh // uint32_t idx1 = d_original_img[orig_block_index + orig_thread_index1]; // uint32_t idx2 = d_original_img[orig_block_index + orig_thread_index2]; // uint32_t idx3 = d_original_img[orig_block_index + orig_thread_index3]; // uint32_t idx4 = d_original_img[orig_block_index + orig_thread_index4]; // // d_resize_img[resize_block_index + resize_thread_index] = (uint8_t)((idx1 + idx2 + idx3 + idx4) >> 2); //} // ///* // Ŝ4x4CubNTCY2x2̎ // { 0, 1, 2, 3, // 4, 5, 6, 6, // 8, 9,10,11, // 12,13,14,14} // ̂悤ȉfl̕т̃O[XP[̔z摜eXg\ // SfĂ̂őx͒xCeXgp //*/ //void show_img(uint8_t* img_array, uint32_t width, uint32_t height){ // cv::Mat mat(width, height, CV_8U); // for (uint32_t y = 0; y < height; y++) { // for (uint32_t x = 0; x < width; x++) { // mat.at<uint8_t>(y, x) = img_array[y*width + x]; // } // } // cv::namedWindow("show_img", cv::WINDOW_AUTOSIZE); // cv::imshow("show_img", mat); // cv::waitKey(0); // cv::destroyAllWindows(); //} // ///* // Ŝ4x4CubNTCY2x2̎ // { 0, 1, 4, 5, // 2, 3, 6, 7, // 8, 9,12,13, // 10,11,14,15} // ̂悤ȉfl̕т̃O[XP[̔z摜eXg\ // SfĂ̂őx͒xCeXgp // block_size == block_height == block_width //*/ //void show_img2(uint8_t* img_array, uint32_t width, uint32_t height, uint32_t block_size) { // cv::Mat mat(width, height, CV_8U); // // //̃W̐ // uint32_t range_x_n = width / block_size; // //c̃W̐ // uint32_t range_y_n = height / block_size; // // uint32_t index = 0; // for(uint32_t y = 0; y < range_y_n * block_size; y += block_size){ // for (uint32_t x = 0; x < range_x_n * block_size; x += block_size) { // //eubN // for (uint32_t block_rows = 0; block_rows < block_size; block_rows++) { // for (uint32_t block_cols = 0; block_cols < block_size; block_cols++) { // mat.at<uint8_t>(y + block_rows, x + block_cols) = img_array[index]; // index++; // } // } // } // } // // cv::namedWindow("show_img", cv::WINDOW_AUTOSIZE); // cv::imshow("show_img", mat); // cv::waitKey(0); // cv::destroyAllWindows(); //} // // ///* // CPU // Ŝ4x4CubNTCY2x2̎Cfl̕т1ȉ̔z̕тɂ // { 0, 1, 2, 3, // 4, 5, 6, 6, // 8, 9,10,11, // 12,13,14,14} -> {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} //*/ //void img2array(cv::Mat img, uint8_t* img_array) { // assert(img.isContinuous()); // img.convertTo(img, CV_8UC1); // // // for (uint32_t y = 0; y < img.rows; y++) { // for (uint32_t x = 0; x < img.cols; x++) { // img_array[y*img.rows + x] = img.at<uint8_t>(y, x); // } // } //} // ///* // CPU // Ŝ4x4CubNTCY2x2̎Cfl̕т1ȉ̔z̕тɂ // { 0, 1, 2, 3, // 4, 5, 6, 7, // 8, 9,10,11, // 12,13,14,15} -> {0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15} //*/ //void img2blockarray(cv::Mat img, uint8_t* img_array, uint32_t block_size){ // assert(img.isContinuous()); // assert((img.cols % block_size) == 0); // assert((img.rows % block_size) == 0); // img.convertTo(img, CV_8UC1); // // //̃W̐ // uint32_t range_x_n = img.cols / block_size; // //c̃W̐ // uint32_t range_y_n = img.rows / block_size; // std::cout << "tatal : "<<img.total() << std::endl; // uint32_t index = 0; // for (uint32_t y = 0; y < range_y_n * block_size; y += block_size) { // for (uint32_t x = 0; x < range_x_n * block_size; x += block_size) { // //eubN // // for (uint32_t block_rows = 0; block_rows < block_size; block_rows++) { // for (uint32_t block_cols = 0; block_cols < block_size; block_cols++) { // // //if(index % 1000 == 0)std::cout << index << std::endl; // img_array[index] = img.at<uint8_t>(y + block_rows, x + block_cols); // index++; // } // } // } // } // //} // //void launch_rgb_compress_kernel(cv::Mat img, uint32_t blocksize) //{ // /* // 0.O // */ // // //hCEWubN̂Pӂ̒ // uint32_t dr_blocksize = blocksize; // //ubN1‚܂މf // uint32_t dr_block_pixel_total = dr_blocksize * dr_blocksize; // //WubN̕ӓ̐ // uint32_t rblock_cols = img.cols / dr_blocksize; // uint32_t rblock_rows = img.rows / dr_blocksize; // uint32_t rblock_count = rblock_cols * rblock_rows; // //hCubN̕ӕӂ̐ // uint32_t dblock_cols = rblock_cols >> 1; // uint32_t dblock_rows = rblock_rows >> 1; // uint32_t dblock_count = dblock_cols * dblock_rows; // // assert(blocksize == 4 || blocksize == 8 || blocksize == 16); // assert((img.cols % blocksize) == 0); // assert((img.rows % blocksize) == 0); // assert(dblock_count % (THREADBLOCK_MAX / blocksize) == 0); // assert(rblock_count % (THREADBLOCK_MAX / blocksize) == 0); // assert(((rblock_count * dblock_count) % THREADBLOCK_MAX) == 0); // assert(rblock_count < (65535 * THREADBLOCK_MAX)); // // std::cout << "domain block count : " << dblock_count << std::endl; // std::cout << "range block count : " << rblock_count << std::endl; // // /* // 1.ubNϊEkϊ // */ // //ubNϊEkϊׂ̈̋NXbhݒ // dim3 fc1block(dr_blocksize, dr_blocksize); // dim3 fc1grid(rblock_cols, rblock_rows); // std::cout << "reduction grid :" << " x = " << fc1grid.x << " y = " << fc1grid.y << std::endl; // uint32_t orig_arraysize = img.total() * img.channels(); // uint8_t* h_orig_img = new uint8_t[orig_arraysize]; // uint8_t* d_orig_img; // uint8_t* d_ranges; // uint8_t* d_domains; // // CHECK(cudaMalloc((void**)&d_orig_img, sizeof(uint8_t) * orig_arraysize)); // CHECK(cudaMalloc((void**)&d_ranges, sizeof(uint8_t) * orig_arraysize)); // CHECK(cudaMalloc((void**)&d_domains, sizeof(uint8_t) * orig_arraysize >> 2)); // // img2array(img, h_orig_img); // // CHECK(cudaMemcpy(d_orig_img, h_orig_img, sizeof(uint8_t) * orig_arraysize, cudaMemcpyHostToDevice)); // fc_make_domains_n_ranges<<<fc1grid, fc1block>>>(d_orig_img, d_ranges, d_domains); // CHECK(cudaDeviceSynchronize()); // //uint8_t* h_ranges = new uint8_t[orig_arraysize]; // //uint8_t* h_domains = new uint8_t[orig_arraysize >> 2]; // //CHECK(cudaMemcpy(h_ranges, d_ranges, sizeof(uint8_t) * orig_arraysize, cudaMemcpyDeviceToHost)); // //CHECK(cudaMemcpy(h_domains, d_domains, sizeof(uint8_t) * orig_arraysize >> 2, cudaMemcpyDeviceToHost)); // //show_img2(h_ranges, img.cols, img.rows, dr_blocksize); // //show_img2(h_domains, img.cols >> 1, img.rows >> 1, dr_blocksize); // // /* // 2.hCEW̑aEŏlőlvZ // */ // //P‚̃XbhubNŕ̃hC // dim3 fc2dblock(dr_blocksize, dr_blocksize, THREADBLOCK_MAX / dr_block_pixel_total); // dim3 fc2dgrid(dblock_count / fc2dblock.z); // // uint32_t* d_dblock_sum; // uint32_t* d_dblock_min; // uint32_t* d_dblock_max; // CHECK(cudaMalloc((void**)&d_dblock_sum, sizeof(uint32_t) * dblock_count)); // CHECK(cudaMalloc((void**)&d_dblock_min, sizeof(uint32_t) * dblock_count)); // CHECK(cudaMalloc((void**)&d_dblock_max, sizeof(uint32_t) * dblock_count)); // fc_domain_summimmax<<<fc2dgrid, fc2dblock>>>(d_domains, dblock_count, d_dblock_sum, d_dblock_min, d_dblock_max); // // //P‚̃XbhubNŕ̃W // dim3 fc2rblock(dr_blocksize, dr_blocksize, THREADBLOCK_MAX / dr_block_pixel_total); // dim3 fc2rgrid(rblock_count / fc2rblock.z); // uint32_t* d_rblock_sum; // uint32_t* d_rblock_min; // uint32_t* d_rblock_max; // CHECK(cudaMalloc((void**)&d_rblock_sum, sizeof(uint32_t) * rblock_count)); // CHECK(cudaMalloc((void**)&d_rblock_min, sizeof(uint32_t) * rblock_count)); // CHECK(cudaMalloc((void**)&d_rblock_max, sizeof(uint32_t) * rblock_count)); // fc_range_summimmax<<<fc2rgrid, fc2rblock>>>(d_ranges, rblock_count, d_rblock_sum, d_rblock_min, d_rblock_max); // // CHECK(cudaDeviceSynchronize()); // // /* // 3.RgXgXP[OEPxVtgvZ // */ // // dim3 fc3block(THREADBLOCK_MAX); // dim3 fc3grid(dblock_count, rblock_count / THREADBLOCK_MAX); // // double* d_contrast_scaling; // uint32_t* d_brightness_shift; // // CHECK(cudaMalloc((void**)&d_contrast_scaling, sizeof(double) * dblock_count * rblock_count)); // CHECK(cudaMalloc((void**)&d_brightness_shift, sizeof(uint32_t) * dblock_count * rblock_count)); // //CHECK(cudaMalloc((void**)&d_adjust_domains_for_ranges, sizeof(uint32_t) * dblock_count * rblock_count * dr_block_pixel_total)); // // fc_calc_scale_n_shift<<<fc3grid, fc3block>>>(d_dblock_sum, // d_dblock_min, // d_dblock_max, // d_rblock_sum, // d_rblock_min, // d_rblock_max, // dr_block_pixel_total, // d_contrast_scaling, // d_brightness_shift); // // CHECK(cudaDeviceSynchronize()); // // /* // 4.RgXgXP[OEPxVtgKpE]EϊEvZEMSEvZi_NVj // */ // // // // dim3 fc4block(dr_blocksize, dr_blocksize, THREADBLOCK_MAX / dr_block_pixel_total); // dim3 fc4grid(dblock_count , rblock_count / fc4block.z); // //eW̊ehC̊e]ϊMSEۑĂ // uint32_t* d_mse; // CHECK(cudaMalloc((void**)&d_mse, sizeof(uint32_t) * rblock_count * dblock_count * 8)); // // fc_transform_n_calc_mse<<<fc4grid, fc4block>>>(d_domains, // d_ranges, // d_contrast_scaling, // d_brightness_shift, // d_mse); // CHECK(cudaDeviceSynchronize()); // //std::cout << sizeof(uint32_t) * rblock_count * dblock_count * 8 << "byte" << std::endl; // // //uint32_t* h_mse = new uint32_t[rblock_count * dblock_count * 8]; // //CHECK(cudaMemcpy(h_mse, d_mse, sizeof(uint32_t) * rblock_count * dblock_count * 8, cudaMemcpyDeviceToHost)); // // //std::cout << "nukiuti : " << h_mse[231] << std::endl; // // //delete[] h_mse; // // /* // 5.eWŏMSEEindexvZ(_NV) // */ // //ubNTCYɉRX^g // init_affine_transformer(dr_blocksize); // dim3 fc5block(THREADBLOCK_MAX); // dim3 fc5grid(dblock_count * 8, rblock_count/ THREADBLOCK_MAX); // // //W̐kf[^ۑ // compress_data_part_rgb_gpu* h_compress_data = new compress_data_part_rgb_gpu[rblock_count]; // compress_data_part_rgb_gpu* d_compress_data; // CHECK(cudaMalloc((void**)&d_compress_data, sizeof(compress_data_part_rgb_gpu) * rblock_count)); // // fc_save_min_mse<<<fc5grid, fc5block>>>(d_mse, d_contrast_scaling, d_brightness_shift, d_compress_data); // // CHECK(cudaMemcpy(h_compress_data, d_compress_data, sizeof(compress_data_part_rgb_gpu) * rblock_count, cudaMemcpyDeviceToHost)); // // //delete[] h_ranges; // //delete[] h_domains; // // /* // L.㏈ // */ // // delete[] h_orig_img; // delete[] h_compress_data; // // CHECK(cudaFree(d_orig_img)); // CHECK(cudaFree(d_ranges)); // CHECK(cudaFree(d_domains)); // // CHECK(cudaFree(d_rblock_sum)); // CHECK(cudaFree(d_rblock_min)); // CHECK(cudaFree(d_rblock_max)); // // CHECK(cudaFree(d_dblock_sum)); // CHECK(cudaFree(d_dblock_min)); // CHECK(cudaFree(d_dblock_max)); // // CHECK(cudaFree(d_contrast_scaling)); // CHECK(cudaFree(d_brightness_shift)); // // CHECK(cudaFree(d_mse)); // // CHECK(cudaDeviceSynchronize()); //}
11,758
#define LIMIT -999 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <sys/time.h> void runTest(int argc, char **argv); int maximum(int a, int b, int c) { int k; if (a <= b) k = b; else k = a; if (k <= c) return (c); else return (k); } __device__ int maximum_dev(int a, int b, int c) { int k; if (a <= b) k = b; else k = a; if (k <= c) return (c); else return (k); } int blosum62[24][24] = { {4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, {0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, {1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, {0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, {0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, {0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}}; double gettime() { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec + t.tv_usec * 1e-6; } int main(int argc, char **argv) { runTest(argc, argv); return EXIT_SUCCESS; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty>\n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); exit(1); } typedef int thread_idx_t; __global__ void kernel_init_reference(int *reference, int *blosum62, int *input_itemsets, int num_rows, int num_cols) { const thread_idx_t thread_idx_i = threadIdx.y + (thread_idx_t)blockIdx.y * blockDim.y; const thread_idx_t thread_idx_j = threadIdx.x + (thread_idx_t)blockIdx.x * blockDim.x; if (thread_idx_i < num_rows && thread_idx_j < num_cols) reference[thread_idx_i * num_cols + thread_idx_j] = blosum62 [ input_itemsets[thread_idx_i * num_cols] * 24 + input_itemsets[thread_idx_j] ]; } __global__ void kernel_init_input_itemsets(int *input_itemsets, int num_cols, int penalty) { const thread_idx_t thread_idx = threadIdx.x + (thread_idx_t)blockIdx.x * blockDim.x; if (thread_idx < num_cols) { input_itemsets[thread_idx * num_cols] = -thread_idx * penalty; input_itemsets[thread_idx] = -thread_idx * penalty; } } __global__ void kernel_top_left_processing(int *input_itemsets, int *reference, int num_cols, int i, int penalty) { __shared__ int s_input_itemsets_nw[1024]; __shared__ int s_input_itemsets_w[1025]; const thread_idx_t thread_idx = threadIdx.x + (thread_idx_t)blockIdx.x * blockDim.x; const thread_idx_t tid = threadIdx.x; const int index = (thread_idx + 1) * num_cols + (i + 1 - thread_idx); s_input_itemsets_nw[tid] = input_itemsets[index - 1 - num_cols] + reference[index]; s_input_itemsets_w[tid + 1] = input_itemsets[index - 1]; if (tid == 0) s_input_itemsets_w[0] = input_itemsets[index - num_cols]; __syncthreads(); if (thread_idx <= i) { input_itemsets[index] = maximum_dev(s_input_itemsets_nw[tid], s_input_itemsets_w[tid + 1] - penalty, s_input_itemsets_w[tid] - penalty); } } __global__ void kernel_bottom_right_processing(int *input_itemsets, int *reference, int num_cols, int i, int penalty) { __shared__ int s_input_itemsets_nw[1024]; __shared__ int s_input_itemsets_n[1025]; const thread_idx_t thread_idx = threadIdx.x + (thread_idx_t)blockIdx.x * blockDim.x; const thread_idx_t tid = threadIdx.x; const int index = (num_cols - thread_idx - 2) * num_cols + thread_idx + num_cols - i - 2; s_input_itemsets_nw[tid] = input_itemsets[index - 1 - num_cols] + reference[index]; s_input_itemsets_n[tid + 1] = input_itemsets[index - num_cols]; if (tid == 0) s_input_itemsets_n[0] = input_itemsets[index - 1]; __syncthreads(); if (thread_idx <= i) { input_itemsets[index] = maximum_dev(s_input_itemsets_nw[tid], s_input_itemsets_n[tid + 1] - penalty, s_input_itemsets_n[tid] - penalty); } } void runTest(int argc, char **argv) { int max_rows, max_cols, penalty; int *input_itemsets, *reference; if (argc == 3) { max_cols = max_rows = atoi(argv[1]); penalty = atoi(argv[2]); } else { usage(argc, argv); } max_rows = max_rows + 1; max_cols = max_cols + 1; cudaMallocHost(&reference, max_rows * max_cols * sizeof(int)); input_itemsets = (int *)calloc(max_rows * max_cols, sizeof(int)); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); // srand(time(NULL)); srand(0); printf("Start Needleman-Wunsch\n"); for (int i = 1; i < max_rows; i++) { input_itemsets[i * max_cols] = rand() % 10 + 1; } for (int j = 1; j < max_cols; j++) { input_itemsets[j] = rand() % 10 + 1; } int *reference_dev; int *input_itemsets_dev; int *blosum62_dev; cudaStream_t reference_copy_stream; cudaStreamCreateWithFlags(&reference_copy_stream, cudaStreamNonBlocking); const size_t size = max_rows * max_cols * sizeof(int); cudaMalloc(&reference_dev, size); cudaMalloc(&input_itemsets_dev, size); cudaMalloc(&blosum62_dev, sizeof(blosum62)); cudaMemcpy(input_itemsets_dev, input_itemsets, size, cudaMemcpyHostToDevice); cudaMemcpy(blosum62_dev, blosum62, sizeof(blosum62), cudaMemcpyHostToDevice); const dim3 block_size(32, 32, 1); const size_t grid_cols = (max_cols + block_size.x - 1) / block_size.x; const size_t grid_rows = (max_rows + block_size.y - 1) / block_size.y; const dim3 grid_size(grid_cols, grid_rows, 1); kernel_init_reference<<< grid_size, block_size >>>(reference_dev, blosum62_dev, input_itemsets_dev, max_rows, max_cols); cudaMemcpyAsync(reference, reference_dev, size, cudaMemcpyDeviceToHost, reference_copy_stream); kernel_init_input_itemsets<<< ceil(max_cols / 1024.0), 1024 >>>(input_itemsets_dev, max_cols, penalty); printf("Processing top-left matrix\n"); for (int i = 0; i < max_cols - 2; i++) { kernel_top_left_processing<<<ceil((i + 1) / 1024.0), 1024>>>(input_itemsets_dev, reference_dev, max_cols, i, penalty); } printf("Processing bottom-right matrix\n"); for (int i = max_cols - 4; i >= 0; i--) { kernel_bottom_right_processing<<<ceil((i + 1) / 1024.0), 1024>>>(input_itemsets_dev, reference_dev, max_cols, i, penalty); } cudaMemcpy(input_itemsets, input_itemsets_dev, size, cudaMemcpyDeviceToHost); cudaStreamDestroy(reference_copy_stream); cudaFree(reference_dev); cudaFree(input_itemsets_dev); cudaFree(blosum62_dev); #define TRACEBACK #ifdef TRACEBACK FILE *fpo = fopen("result.txt", "w"); fprintf(fpo, "print traceback value:\n"); for (int i = max_rows - 2, j = max_rows - 2; i >= 0, j >= 0;) { int nw, n, w, traceback; if (i == max_rows - 2 && j == max_rows - 2) fprintf(fpo, "%d ", input_itemsets[i * max_cols + j]); if (i == 0 && j == 0) break; if (i > 0 && j > 0) { nw = input_itemsets[(i - 1) * max_cols + j - 1]; w = input_itemsets[i * max_cols + j - 1]; n = input_itemsets[(i - 1) * max_cols + j]; } else if (i == 0) { nw = n = LIMIT; w = input_itemsets[i * max_cols + j - 1]; } else if (j == 0) { nw = w = LIMIT; n = input_itemsets[(i - 1) * max_cols + j]; } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + reference[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if (traceback == new_nw) traceback = nw; if (traceback == new_w) traceback = w; if (traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if (traceback == nw) { i--; j--; continue; } else if (traceback == w) { j--; continue; } else if (traceback == n) { i--; continue; } } fclose(fpo); #endif cudaFreeHost(reference); free(input_itemsets); }
11,759
# include <stdio.h> # include <stdlib.h> #define M 8 #define N 9 #define THREAD_X 2 #define THREAD_Y 3 #define A(i,j) A[i*N+j] #define B(i,j) B[i*N+j] #define C(i,j) C[i*N+j] __global__ void matAdd(int *A, int *B, int *C){ int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; A(i,j) = B(i,j) + C(i,j); } int main(){ int A[M][N], B[M][N], C[M][N]; int *A_d, *B_d, *C_d; int i, j; dim3 dimBlock(THREAD_X, THREAD_Y); dim3 dimGrid(M/THREAD_X, N/THREAD_Y); cudaMalloc((void**)&A_d, M*N*sizeof(int)); cudaMalloc((void**)&B_d, M*N*sizeof(int)); cudaMalloc((void**)&C_d, M*N*sizeof(int)); for(i = 0; i < M; i++){ for(j = 0; j < N;j++){ B[i][j] = i*j; C[i][j] = i+j; //printf("%d ",B[i][j]); } //printf("\n"); } cudaMemcpy(A_d, A, sizeof(int)*M*N, cudaMemcpyHostToDevice); cudaMemcpy(B_d, B, sizeof(int)*M*N, cudaMemcpyHostToDevice); cudaMemcpy(C_d, C, sizeof(int)*M*N, cudaMemcpyHostToDevice); matAdd<<<dimGrid, dimBlock>>>(A_d, B_d, C_d); cudaMemcpy(A, A_d, sizeof(int)*M*N, cudaMemcpyDeviceToHost); for(i = 0; i < M; i++){ for(j = 0; j < N;j++){ printf("%d ", A[i][j]); } printf("\n"); } cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); }
11,760
#include <iostream> #include <string.h> #include <fstream> #include <vector> #include <sstream> #include <unordered_map> #include <cuda.h> #include <cuda_runtime_api.h> #include <algorithm> #include <math.h> using namespace std; // Compile with: // nvcc -o example example.cu #define ERROR_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void normalize(float* mat, float* normSum_d, float* matrixNorm_d, int dim, int max){ int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < max){ for(int j = 0; j < dim; j++){ matrixNorm_d[i*dim+j] = mat[i*dim+j] / normSum_d[i]; } } } __global__ void vectorManipulation(float* A, float* B, float* C, float* D, int dim){ int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < dim) D[i] = A[i] + C[i] - B[i]; } __global__ void matrixManipulation(float* matrix, float* vector, int dim){ int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < len) } __global__ void vecMatMultiplication(float* mat, float* vec, float* res, int len, int max){ int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < max){ for(int j = 0; j < len; j ++){ res[i] += mat[i*len+j] * vec[j]; } } } int main(int argc, char* argv[]) { if(argc == 2){ if(strcmp(argv[1], "analogy") == 0){ cout << "usage: ./a.out analogy path/to/your/model path/to/your/testfile" << endl; cout << " or: ./a.out analogy path/to/your/model word1 word2 word3" << endl; } else cout << "function not supported" << endl; } else if (argc > 2){ unordered_map<string, int> word2vec_map; string str; ifstream infile; infile.open(argv[2]); int word_count = 0; getline(infile,str); word_count++; stringstream sss(str); int dim = -1; string buff; while(sss >> buff) dim ++; while(getline(infile,str)){ word_count++; } infile.close(); string dictionary[word_count]; float normSum_h[word_count]; infile.open(argv[2]); int matrix_size = word_count*dim; float *matrix_h = new float[matrix_size]; float *resVec_h = new float[word_count]; int i = 0; while(getline(infile,str)){ string buf; stringstream ss(str); ss >> buf; word2vec_map[buf] = i; dictionary[i] = buf; int j = 0; while (ss >> buf){ matrix_h[i*dim+j] = stof(buf); normSum_h[i] += pow(stof(buf),2); j++; } normSum_h[i] = sqrt(normSum_h[i]); i++; } infile.close(); float* matrix_d; float* matrixNorm_d; float* D; float* resVec_d; float* normSum_d; ERROR_CHECK(cudaMalloc((void **)&matrix_d, matrix_size*sizeof(float))); ERROR_CHECK(cudaMalloc((void **)&matrixNorm_d, matrix_size*sizeof(float))); ERROR_CHECK(cudaMalloc((void **)&D, dim*sizeof(float))); ERROR_CHECK(cudaMalloc((void **)&resVec_d, word_count*sizeof(float))); ERROR_CHECK(cudaMalloc((void **)&normSum_d, word_count*sizeof(float))); ERROR_CHECK(cudaMemcpy(matrix_d, matrix_h, matrix_size*sizeof(float), cudaMemcpyHostToDevice)); ERROR_CHECK(cudaMemcpy(normSum_d, normSum_h, word_count*sizeof(float), cudaMemcpyHostToDevice)); dim3 dimGrid(ceil(word_count/1024.0), 1, 1); dim3 dimBlock(1024, 1, 1); normalize<<<dimGrid, dimBlock>>>(matrix_d, normSum_d, matrixNorm_d, dim, word_count); float *matrixNorm_h = new float[matrix_size]; ERROR_CHECK(cudaMemcpy(matrixNorm_h, matrixNorm_d, matrix_size*sizeof(float), cudaMemcpyDeviceToHost)); if(strcmp(argv[1],"analogy") == 0){ if(argc == 6){ int count[3]; for(int i = 0; i < 3; i++){ count[i] = word2vec_map.count(argv[3+i]); if(count[i] != 1){ cout << "model does not contain the word: " << argv[3+i] << endl; return -1; } } int idx_1 = word2vec_map[argv[3]]; int idx_2 = word2vec_map[argv[4]]; int idx_3 = word2vec_map[argv[5]]; dim3 dimGrid1(1, 1, 1); dim3 dimBlock1(dim, 1, 1); vectorManipulation<<<dimGrid1, dimBlock1>>>(&matrixNorm_d[idx_1*dim], &matrixNorm_d[idx_2*dim], &matrixNorm_d[idx_3*dim], D, dim); ERROR_CHECK(cudaDeviceSynchronize()); dim3 dimGrid2(ceil(word_count/1024.0), 1, 1); dim3 dimBlock2(1024, 1, 1); vecMatMultiplication<<<dimGrid2, dimBlock2>>>(matrixNorm_d, D, resVec_d, dim, matrix_size); ERROR_CHECK(cudaMemcpy(resVec_h, resVec_d, word_count*sizeof(float), cudaMemcpyDeviceToHost)); resVec_h[idx_1] = 0; resVec_h[idx_2] = 0; resVec_h[idx_3] = 0; int max = std::max_element(resVec_h, resVec_h + word_count) - resVec_h; cout << dictionary[max] << endl; } else if(argc == 4) { ifstream infile; infile.open(argv[3]); int predict_line_count = 0; while(getline(infile,str)){ predict_line_count++; } infile.close(); int predict_word_count = predict_line_count * 3 * dim; float *predict_matrix = new float[predict_word_count]; // cout << predict_line_count << endl; infile.open(argv[3]); int i = 0; while(getline(infile, str)){ string buf; stringstream ss(str); string line[4]; int j = 0; while (ss >> buf){ line[j] = buf; j++; } int count; for(int j = 0; j < 4; j++) count += word2vec_map.count(line[j]); if(count != 4) { cout << "model does not contain one of the word" << endl; break; } else { for(int j = 0; j < 3; j++){ int idx = word2vec_map[line[j]]; std::copy(matrixNorm_d + idx * dim, matrixNorm_d + idx * dim + dim, predict_matrix + (i * 3 + j) * dim); } } i++; } infile.close(); // for(int i = 0; i < 150; i ++){ // cout << predict_matrix[i] << endl; // } } } ERROR_CHECK(cudaFree(matrix_d)); ERROR_CHECK(cudaFree(matrixNorm_d)); ERROR_CHECK(cudaFree(D)); ERROR_CHECK(cudaFree(resVec_d)); } return 0; }
11,761
#include "includes.h" __global__ void test(float* nonSmoothed, float* smoothed, int* mask, int nhalf) { int i = threadIdx.x + blockDim.x*blockIdx.x; int diff; if (i < nhalf) { diff = fabs(nonSmoothed[i] - smoothed[i]/nhalf); mask[i] = (diff > 0.23) ? 1 : 0; // WHAT THRESHOLD TO USE?? different behaviour when compared to CPU version! } }
11,762
/* * Tiled Matrix Multiplication * (MP2, Fall 2014, GPU Programming/Auburn University) * * Compile with -DTILE_WIDTH=16 (for example) to change the tile size. * Compile with -DSEED=12 (for example) to seed the random number generator. */ #include <assert.h> #include <cuda.h> #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> /* Usage message displayed when invalid command line arguments are supplied */ #define USAGE \ "MP2 generates a random (m x k) matrix M and (k x n) matrix N\n" \ "and multiplies M by N using tiled matrix multiplication.\n" \ "The values of m, k, and n must be >= 1.\n" \ "\n" \ "Usage: mp2 m k n\n" /* Tile size -- define here if not defined using the -D compiler flag */ #ifndef TILE_WIDTH # define TILE_WIDTH 16 #endif /* Seed for the random number generator -- define here if not using -D */ #ifndef SEED # define SEED 1 #endif /* Maximum difference allowed between the GPU and CPU result matrices */ #define EPSILON 1e-2 /* If a CUDA call fails, display an error message and exit */ #define CUDA_CHECK(e) { \ cudaError_t err = (e); \ if (err != cudaSuccess) \ { \ fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \ __FILE__, __LINE__, #e, cudaGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } /* assert() is only supported on devices of compute capability >= 2.0 */ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # undef assert # define assert(arg) #endif void getDistance(float *h_M, float *h_P, int m, int k, int col, int row){ float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(h_M[row*k+i] - h_M[col*k+i], 2); } expected = sqrt(expected); h_P[row*m+col] = expected; } void argMin(float* h_M, int* result, int m, int k, int row){ float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void argMin_gpu(float* h_M, int* result, int m, int k){ assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; if(row >= m) return; float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void getDistance_gpu(float *d_M, float *d_P, int m, int k) { assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col= blockIdx.x * TILE_WIDTH + threadIdx.x; if(row >= m || col >= m) return; if(row == col){ d_P[row*m+col] = 100; return; } float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(d_M[row*k+i] - d_M[col*k+i], 2); } expected = sqrt(expected); d_P[row*m+col] = expected; } /* Displays one row of the given matrix */ static void printRow(int row, float *matrix, int cols) { printf("["); if (cols >= 1) printf(" %3.3f", matrix[row*cols+0]); if (cols >= 2) printf(" %3.3f", matrix[row*cols+1]); if (cols >= 3) printf(" %3.3f", matrix[row*cols+2]); if (cols >= 6) printf(" ..."); if (cols >= 5) printf(" %3.3f", matrix[row*cols+(cols-2)]); if (cols >= 4) printf(" %3.3f", matrix[row*cols+(cols-1)]); printf(" ]\n"); } /* Displays the given matrix */ static void printMatrix(float *matrix, int rows, int cols) { if (rows >= 1) printRow(0, matrix, cols); if (rows >= 2) printRow(1, matrix, cols); if (rows >= 3) printRow(2, matrix, cols); if (rows >= 6) printf(" ...\n"); if (rows >= 5) printRow(rows-2, matrix, cols); if (rows >= 4) printRow(rows-1, matrix, cols); } /* Program entrypoint. Invoke with three command line arguments: m k n */ int main() { printf("%d, %d, %d, %d\n", sizeof(long), sizeof(long long), sizeof(bool), sizeof(char)); /* Get command line arguments; save as m, k, and n */ int m = 100; int k = 128; if (m < 1 || k < 1) { fprintf(stderr, USAGE); fprintf(stderr, "Invalid value for m or k (%d, %d)\n", m, k); return EXIT_FAILURE; } printf("using (%d x %d) tiles.\n", TILE_WIDTH, TILE_WIDTH); /********************************************/ /* M is (m x k), P is (m x m) */ /********************************************/ /* Compute number of bytes needed to stores matrices M and P */ size_t bytesForM = m * k * sizeof(float); size_t bytesForP = m * m * sizeof(float); /* Allocate host memory for matrices */ float *h_M, *h_P; float *result = new float[m*m]; int *index = new int[m]; h_M = (float *)malloc(bytesForM); h_P = (float *)malloc(bytesForP); if (h_M == NULL || h_P == NULL) { fprintf(stderr, "Unable to allocate host memory\n"); return EXIT_FAILURE; } /* Allocate device memory for matrices */ float *d_M, *d_P; int *d_index; CUDA_CHECK(cudaMalloc((void **)&d_M, bytesForM)); CUDA_CHECK(cudaMalloc((void **)&d_P, bytesForP)); CUDA_CHECK(cudaMalloc((void **)&d_index, m*sizeof(int))); /* Fill M (on host) */ std::cout << "Loading matrices...\n"; std::ifstream in1, in2; in1.open("descriptor.txt"); for (int i = 0; i < m*k; ++i) in1 >> h_M[i]; in1.close(); printf("M =\n"); printMatrix(h_M, m, k); /* Copy M to device global memory */ CUDA_CHECK(cudaMemcpy(d_M, h_M, bytesForM, cudaMemcpyHostToDevice)); /* Launch the CUDA kernel */ dim3 dimGrid((m+TILE_WIDTH-1)/TILE_WIDTH, (m+TILE_WIDTH-1)/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); printf("matMul called from host\n"); getDistance_gpu<<<dimGrid, dimBlock>>>(d_M, d_P, m, k); argMin_gpu<<<dimGrid, dimBlock>>>(d_P, d_index, m, k); CUDA_CHECK(cudaDeviceSynchronize()); /* Copy result matrix from device global memory back to host memory */ CUDA_CHECK(cudaMemcpy(h_P, d_P, bytesForP, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(index, d_index, m*sizeof(int), cudaMemcpyDeviceToHost)); printf(" product received from host\n"); printf("P =\n"); printMatrix(h_P, m, m); std::ofstream out, out2; out2.open("matrix.txt"); for (int i = 0; i < 100; i++){ for (int j = 0; j < m; j++){ if (h_P[i*m+j] < 0.3) out2 << j+1 << " "; } out2 << std::endl; } out2.close(); /* for (int row = 0; row < m; row++) { for (int col = 0; col < m; col++) { getDistance(h_M, result, m, k, col, row); } } printf("\nExpected matrix:\n"); printMatrix(result, m, m); printf("\n"); for (int i = 0; i < m; i++){ printf("%d ", index[i]); } printf("\n"); */ /* Free device global memory */ CUDA_CHECK(cudaFree(d_M)); CUDA_CHECK(cudaFree(d_P)); CUDA_CHECK(cudaFree(d_index)); /* Free host memory */ free(h_M); free(h_P); free(index); free(result); /* Reset the device (unnecessary if not profiling, but good practice) */ CUDA_CHECK(cudaDeviceReset()); printf("Done\n"); system("Pause"); return EXIT_SUCCESS; }
11,763
/*-- --*/ #include "../include/cooling_method.cuh" float geometric_cooling(float init_st, int NUM_ITER, float rate){ float ret; ret = powf(rate, NUM_ITER) * init_st; return ret; } float hyperbolic_cooling(float init_st, int NUM_ITER){ float ret; ret = init_st / sqrtf(NUM_ITER + 1); return ret; }
11,764
// // Created by daisy on 15.11.17. // #include "cuda.h" #include "cuda_runtime.h" #include "stdio.h" #include "device_launch_parameters.h" #define TILE_WIDTH 2 //simple example //1. N*N matrix for ALL M, N, P //2. No shared memory, low efficiency __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int width){ //blockDim gives number of threads in each block in this direction float cValue=0.0; int row=blockIdx.y*blockDim.y+threadIdx.y; int col=blockIdx.x*blockDim.x+threadIdx.x; if (row >width || col >width) return; for (int i=0;i<width;i++){ cValue+=(Md[row*width+i]*Nd[i*width+col]); } Pd[row*width+col]=cValue; } void MatrixMultiplication(float* M, float* N, float* P,int width){ int size=width*width*sizeof(float); float * Md; float * Nd; float * Pd; cudaMalloc((void**) &Md,size); cudaMalloc((void**) &Nd,size); cudaMalloc((void**) &Pd,size); cudaMemcpy(Md,M,size,cudaMemcpyHostToDevice); cudaMemcpy(Nd,N,size,cudaMemcpyHostToDevice); dim3 dimGrid((width+TILE_WIDTH-1)/TILE_WIDTH,(width+TILE_WIDTH-1)/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH,TILE_WIDTH); MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,width); cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost); cudaFree(Md); cudaFree(Nd); cudaFree(Pd); } void ReadMatrix(float* M,int width){ int i=0; int j=0; for(i=0;i<width;i++){ for(j=0;j<width;j++){ M[i*width+j]=rand(); } } } void PrintMatrix(float*M,int width){ int i; int j; for (i = 0; i < width ; i ++) { for (j = 0; j < width ; j ++) { printf ("%f␣", M[i * width + j]); } printf ("\n"); } } int main(void){ float* M; float* N; float* P; int width=32; // scanf("%d",&width); M = ( float * ) malloc ( width * width * sizeof ( float )); N = ( float *) malloc ( width * width * sizeof ( float )); P = ( float *) malloc ( width * width * sizeof ( float )); ReadMatrix (M, width ); ReadMatrix (N, width ); MatrixMultiplication (M, N, P, width ); PrintMatrix (M, width ); printf ("\n"); PrintMatrix (N, width ); printf ("\n"); PrintMatrix (P, width ); free (M); free (N); free (P); }
11,765
/* * a simple test */ __shared__ float data1[1024]; __shared__ float data2[1024]; __shared__ float data3[1024]; __device__ void mult(__shared__ float d1[1024], __shared__ float d2[1024], __shared__ float d3[1024], int idx) { int i; int j, k, l; j = -1; k = 0; l = 1; for (i = 0; i < 1024; i++) { j++; k++; l++; d1[j+k+l] = 1.0; } } __global__ void doit(int start, int end) { int i = 99; mult(data1, data2, data3, i); }
11,766
#include <cstdio> #include <cstdlib> #include <vector> #include <ctime> __global__ void bucketSort( int *key, int keylen, int *bucket, int range ){ int rank = blockIdx.x * blockDim.x + threadIdx.x; // I found that if the numbers of threads in each block are not equal // there are some bugs. // So it's better to filter high rank process to avoid to // exceed the upper limit. if( rank >= keylen ) return; // Initialize bucket[] in global memory if( rank < range ) bucket[rank] = 0; __syncthreads(); // Update bucket[] atomically atomicAdd( bucket+key[rank], 1 ); __syncthreads(); // FOR DEBUG /* if( rank == 0 ){ printf("\n"); for( int i = 0; i < range; i++ ) printf("bucket[%d] = %d\n", i, bucket[i]); printf("\n"); } */ // Copy data from bucket[] to key[rank] // for j = 0 ~ b[0]-1, key[j] = 0 // for j = b[0] ~ b[0]+b[1]-1, key[j] = 1 // for j = b[0]+b[1] ~ b[0]+b[1]+b[2]-1, key[j] = 2 // etc int bIdx = -1, bItems = 0; do{ bItems += bucket[ ++bIdx ]; }while( bItems <= rank ); key[rank] = bIdx; } int main() { const int n = 50; const int m = 20; const int range = 5; //std::vector<int> key(n); int *key; cudaMallocManaged( &key, n*sizeof(int) ); for (int i=0; i<n; i++) { key[i] = rand() % range; } //std::vector<int> bucket(range); int *bucket; cudaMallocManaged( &bucket, range*sizeof(int) ); // `key` and `bucket` will reside in global memory clock_t before = clock(); bucketSort<<< (n+m-1)/m, m >>>( key, n, bucket, range ); clock_t after = clock(); cudaDeviceSynchronize(); }
11,767
// std::system includes #include <memory> #include <iostream> #include <stdio.h> #include <time.h> // CUDA-C includes #include <cuda.h> #include <cuda_runtime.h> //#include <helper_cuda.h> //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { printf("test\n"); const int num_vert = 629; char const* const inputFile = argv[1]; FILE* file = fopen(inputFile, "r"); int vert_x[num_vert]; int vert_y[num_vert]; printf("test\n"); char line[256]; int i = 0; while (fgets(line, sizeof(line), file) != NULL) { sscanf(line, "%d %d", &vert_x[i], &vert_y[i]); ++i; } fclose(file); printf("test\n"); //Sanity check for read: srand(time(NULL)); int rand_i = rand() % num_vert; printf("Coordinates on random index %d are (%d,%d)\n", rand_i, vert_x[rand_i], vert_y[rand_i]); printf("Coordinates on first index are (%d,%d)\n", vert_x[0], vert_y[0]); printf("Coordinates on last index are (%d,%d)\n", vert_x[num_vert - 1], vert_y[num_vert - 1]); int deviceCount = 0; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); printf("Result = FAIL\n"); exit(EXIT_FAILURE); } // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0) { printf("There are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); } for (int dev = 0; dev < deviceCount; ++dev) { cudaSetDevice(dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("\nDevice %d: %s \n", dev, deviceProp.name); printf("\nMaxThreadsPerBlock: %d \n", deviceProp.maxThreadsPerBlock); printf("\nMaxThreadDim (%d,%d,%d) \n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf("\nMaxGridSize (%d,%d,%d) \n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); } // finish // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); char ch; std::cin >> ch; exit(EXIT_SUCCESS); }
11,768
#include <cuda.h> #include <cuda_runtime.h> #include "stdio.h" #include <sys/time.h> #define DEBUG 1 extern "C" { // cleaner error handling; just wrap cuda library calls with gpuErrchk(foo()); #define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __device__ int get_tid() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y; return blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; } __global__ void dimer_1min1pls_t4(int n_elem, double *d_H, int *d_i, int *d_j, int *d_dim, float *d_sign, int *d_n_orb1, int *d_n_orb2, double *d_Rc1, double *d_Rcca1, double *d_Ra2, double *d_Rcaa2, double *d_h, double *d_V1, double *d_V2) { const int tid = get_tid(); if(tid < n_elem) { const int n_orb1 = d_n_orb1[tid]; const int n_orb2 = d_n_orb2[tid]; double tmp = 0.0; int p,q,r,s; // In practice the n calculated here is not equal to the "actual" n here // since multiple kernel calls will be needed. // n_elem is the length // Top Loop // Use the tid to calculate variables in five dimensions //int n = tid; s = tid % n_orb2; r = ((tid - s) / (n_orb2)) % n_orb1; q = ((tid - r - s) * (n_orb2 * n_orb1)) % n_orb1; p = ((tid - q - r - s) / (n_orb2 * n_orb1 * n_orb1)) % n_orb1; tmp = d_V1[(((p * n_orb1 + q ) * n_orb1 + r) * n_orb2 + s) * n_elem + tid] * d_Rcca1[((q * n_orb1 + p)* n_orb1 + r) * n_elem + tid] * d_Ra2[s * n_elem + tid]; // Middle Loop // 5d calculation s = tid % n_orb2; r = ((tid - s) / (n_orb2)) % n_orb2; q = ((tid - r - s) * (n_orb2 * n_orb2)) % n_orb2; p = ((tid - q - r - s) / (n_orb2 * n_orb2 * n_orb2)) % n_orb1; tmp += d_V2[(((p * n_orb2 + q ) * n_orb2 + r) * n_orb2 + s) * n_elem + tid] * d_Rcaa2[((q * n_orb2 + s) * n_orb2 + r) * n_elem + tid] * d_Rc1[p * n_elem + tid]; tmp *= 2.0; // Bottom loop // 3d calculation s = tid % n_orb2; r = ((tid - s) / (n_orb2)) % n_orb1; tmp += d_h[(r * n_orb2 * s) * n_elem + tid] * d_Rc1[r * n_elem + tid] * d_Ra2[s * n_elem + tid]; tmp *= d_sign[tid]; // Assignment int index = d_i[tid] * d_dim[tid] + d_j[tid]; d_H[index * n_elem + tid] = tmp; } } void dimer_1min1pls_loop(int n_elem, double** H, int* i, int* j, int* dim, float* sign, int* n_orb1, int* n_orb2, double** Rc1, double** Rcca1, double** Ra2, double** Rcaa2, double** h, double** V1, double** V2) { struct timeval start,stop; gettimeofday(&start,0); double *d_H, *d_Rc1, *d_Rcca1, *d_Ra2, *d_Rcaa2, *d_h, *d_V1, *d_V2; int *d_i, *d_j, *d_n_orb1, *d_n_orb2, *d_dim; float *d_sign; const int n1 = n_orb1[0]; const int n2 = n_orb2[0]; const int V1_len = n_elem * n2 * n1 * n1 * n1; const int V2_len = n_elem * n2 * n2 * n2 * n1; const int Rc1_len = n_elem * n1; const int Ra2_len = n_elem * n2; const int Rcca1_len = n_elem * n1 * n1 *n1; const int Rcaa2_len = n_elem * n2 * n2 * n2; const int h_len = n_elem * n1 * n2; const int H_len = n_elem * n1 * n1 * n1 * n2; // could be an issue if n1 != n2 const int chunks = n1 * n2; // for maximum thread utilization this should go evenly into H_len const int elemPerChunk = ceil(n_elem / chunks); const int threadsPerChunk = ceil(H_len / chunks); const dim3 dimblock(4, 4, 4); const dim3 dimgrid(ceil(threadsPerChunk / dimblock.x), ceil(threadsPerChunk / dimblock.y), ceil(threadsPerChunk / dimblock.z)); if(DEBUG) { printf("Chunks: %d\tThreads per chunk: %d\n",chunks,threadsPerChunk); printf("dimblock:\t<%d\t%d\t%d>\ndimgrid:\t<%d\t%d\t%d>\n",dimblock.x,dimblock.y,dimblock.z,dimgrid.x,dimgrid.y,dimgrid.z); } const int V1_size = sizeof(double) * ceil(V1_len / chunks); const int V2_size = sizeof(double) * ceil(V2_len / chunks); const int Rc1_size = sizeof(double) * ceil(Rc1_len / chunks); const int Ra2_size = sizeof(double) * ceil(Ra2_len / chunks); const int Rcca1_size = sizeof(double) * ceil(Rcca1_len / chunks); const int Rcaa2_size = sizeof(double) * ceil(Rcaa2_len / chunks); const int h_size = sizeof(double) * ceil(h_len / chunks); const int i_size = sizeof(int) * elemPerChunk; // for all the n-elem int arrays const int sign_size = sizeof(float) * elemPerChunk; const int H_size = sizeof(double) * threadsPerChunk; if(DEBUG) { printf("Starting device memory allocation\n"); } gpuErr(cudaMalloc((void **) &d_H, H_size)); gpuErr(cudaMalloc((void **) &d_Rc1, Rc1_size)); gpuErr(cudaMalloc((void **) &d_V1, V1_size)); gpuErr(cudaMalloc((void **) &d_V2, V2_size)); gpuErr(cudaMalloc((void **) &d_Ra2, Ra2_size)); gpuErr(cudaMalloc((void **) &d_Rcca1, Rcca1_size)); gpuErr(cudaMalloc((void **) &d_Rcaa2, Rcaa2_size)); gpuErr(cudaMalloc((void **) &d_h, h_size)); gpuErr(cudaMalloc((void **) &d_i, i_size)); gpuErr(cudaMalloc((void **) &d_j, i_size)); gpuErr(cudaMalloc((void **) &d_dim, i_size)); gpuErr(cudaMalloc((void **) &d_n_orb1, i_size)); gpuErr(cudaMalloc((void **) &d_n_orb2, i_size)); gpuErr(cudaMalloc((void **) &d_sign, sign_size)); if(DEBUG) { printf("Finished device memory allocation\n"); } for(int k=0; k<chunks; k++) { if(DEBUG) { printf("Chunk %d: starting host to device memcpys\n",k); } gpuErr(cudaMemcpy(d_Rc1, Rc1 + k * Rc1_len, Rc1_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_V1, V1 + k * V1_len, V1_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_V2, V2 + k * V2_len , V2_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_Ra2, Ra2 + k * Ra2_len, Ra2_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_Rcca1, Rcca1 + k * Rcca1_len, Rcca1_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_Rcaa2, Rcaa2 + k * Rcaa2_len, Rcaa2_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_h, h + k * h_len, h_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_i, i + k * elemPerChunk, i_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_j, j + k * elemPerChunk, i_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_n_orb1, n_orb1 + k * elemPerChunk, i_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_n_orb2, n_orb2 + k * elemPerChunk, i_size, cudaMemcpyHostToDevice)); gpuErr(cudaMemcpy(d_sign, sign + k * elemPerChunk, sign_size, cudaMemcpyHostToDevice)); if(DEBUG) { printf("Chunk %d: finished host to device memcpys\n",k); printf("Chunk %d: launching dimer_1min1pls_t4 kernel\n",k); } dimer_1min1pls_t4<<<dimblock,dimgrid>>>(n_elem,d_H,d_i,d_j,d_dim,d_sign,d_n_orb1,d_n_orb2,d_Rc1,d_Rcca1,d_Ra2,d_Rcaa2,d_h,d_V1,d_V2); gpuErr(cudaPeekAtLastError()); gpuErr(cudaDeviceSynchronize()); memcpy(H+k*H_len, d_H, H_size); if(DEBUG) { printf("Chunk %d: finished executing dimer_1min1pls_t4 kernel \n",k); } } gpuErr(cudaFree(d_Rc1)); gpuErr(cudaFree(d_V1)); gpuErr(cudaFree(d_V2)); gpuErr(cudaFree(d_Ra2)); gpuErr(cudaFree(d_Rcca1)); gpuErr(cudaFree(d_Rcaa2)); gpuErr(cudaFree(d_h)); gpuErr(cudaFree(d_i)); gpuErr(cudaFree(d_j)); gpuErr(cudaFree(d_n_orb1)); gpuErr(cudaFree(d_n_orb2)); gpuErr(cudaFree(d_sign)) gettimeofday(&stop,0); if(DEBUG) { double t = (double)(stop.tv_sec-start.tv_sec)*1000+(double)(stop.tv_usec-start.tv_usec)/1000; printf("dimer_1min1pls_loop_t4 finished in %f ms\n", t); } } }
11,769
#include <cmath> __global__ void cbrt_pow(double* value) { value[threadIdx.x] = std::pow(value[threadIdx.x], 1. / 3); }
11,770
#include <iostream> #include <fstream> #include <string> #include <vector> #include <algorithm> #include <cstdlib> #include <cmath> #include <ctime> #include <time.h> #include <stdint.h> #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; __global__ void gpu_multiplication_reduce(int *first, int *subarrayN, int *N, double* vectorIN, bool* vectorBIN, double *d_out) { //index size_t index = threadIdx.x + blockIdx.x * blockDim.x + (blockDim.x * blockIdx.x); int tid = threadIdx.x; int dim = *N; int offset = *subarrayN / 2; if (*first == 1 && tid < offset && index + offset < dim) { if (vectorBIN[index]) { if (vectorBIN[index + offset]) { vectorIN[index] *= vectorIN[index + offset]; } else { vectorIN[index] *= (1 - vectorIN[index + offset]); } } else if (vectorBIN[index + offset]) { vectorIN[index] = (1 - vectorIN[index]) * vectorIN[index + offset]; } else { vectorIN[index] = (1 - vectorIN[index]) * (1 - vectorIN[index + offset]); } offset = offset / 2; } __syncthreads(); for (int i = offset; i > 0; i >>= 1) { if (tid < i && index + i < dim) { vectorIN[index] *= vectorIN[index + i]; } __syncthreads(); } if (tid == 0) { d_out[blockIdx.x] = vectorIN[index]; printf("total is %f\n", d_out[blockIdx.x]); } } fstream spam_file; fstream nonspam_file; vector<string> words; vector<int> spam_occ; vector<int> nonspam_occ; double * spam_vector; double * nonspam_vector; bool * binary_vector; // m int spam_length; // n int nonspam_length; void print_words_and_occurencies() { for (size_t i = 0; i < words.size(); i++) { cout << words[i] << " " << spam_occ[i] << " " << nonspam_occ[i] << endl; } } void print_vectors() { cout << "spam_length: " << spam_length << ", nonspamlength: " << nonspam_length << endl; cout << "word_vector" << " " << "spam_vector" << " " << "nonspam_vector" << endl; for (int i = 0; i < words.size(); i++) { cout << words[i] << " " << spam_vector[i] << " " << nonspam_vector[i] << endl; } } void finish_vectors() { spam_vector = new double[words.size()]; nonspam_vector = new double[words.size()]; for (int i = 0; i < words.size(); i++) { spam_vector[i] = (double) spam_occ[i] / (double) spam_length; nonspam_vector[i] = (double) nonspam_occ[i] / (double) nonspam_length; } } void count_occurencies_length() { string line; int counter = -1; while (!spam_file.eof()) { counter++; getline(spam_file, line); } spam_length = counter; counter = -1; while (!nonspam_file.eof()) { counter++; getline(nonspam_file, line); } nonspam_length = counter; spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void add_spam_training_examples() { cout << "Add spam examples to the database (\"quit\" to exit function.)" << endl; string spam_example = ""; cin.ignore(); do { getline(cin, spam_example); if (spam_example.compare("quit")) { spam_file << spam_example << endl; } else { break; } } while (spam_example.compare("quit")); spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void add_nonspam_training_examples() { cout << "Add nonspam examples to the database (\"quit\" to exit function.)" << endl; string nonspam_example = ""; cin.ignore(); do { getline(cin, nonspam_example); if (nonspam_example.compare("quit")) { nonspam_file << nonspam_example << endl; } else { break; } } while (nonspam_example.compare("quit")); cout << "haha" << endl; spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void create_words_vector() { words.clear(); count_occurencies_length(); string word; string line; string separator = " "; size_t position; // spam_file-> words vector while (!spam_file.eof()) { getline(spam_file, line); position = line.find(separator); while (line.compare("")==1) { position = line.find(separator); word = line.substr(0, position); line.erase(0, position+1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); spam_occ[pos] += 1; } else { /* words does not contain word */ words.push_back(word); int pos = distance(words.begin(), find(words.begin(), words.end(), word)); spam_occ.push_back(1); } if (position == string::npos) { break; } } } for (int i = 0; i < words.size(); i++) { nonspam_occ.push_back(0); } // nonspam_file-> words vector while (!nonspam_file.eof()) { getline(nonspam_file, line); position = line.find(separator); while (line.compare("") == 1) { position = line.find(separator); word = line.substr(0, position); line.erase(0, position + 1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); nonspam_occ[pos] += 1; } else { /* words does not contain word */ words.push_back(word); int pos = distance(words.begin(), find(words.begin(), words.end(), word)); nonspam_occ.push_back(1); spam_occ.push_back(0); } if (position == string::npos) { break; } } } spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); finish_vectors(); } void run_cpu(int word_size) { double p_test_spam = 1.0; double p_test_nonspam = 1.0; double time = clock(); for (int i = 0; i < word_size; i++) { if (binary_vector[i]) { p_test_spam *= spam_vector[i]; p_test_nonspam *= nonspam_vector[i]; } else { p_test_spam *= (1.0-spam_vector[i]); p_test_nonspam *= (1.0 - nonspam_vector[i]); } } cout << "CPU time is " << clock() - time << endl; cout << "total (spam) = " << p_test_spam << endl; cout << "total (nonspam) = " << p_test_nonspam << endl; double p_spam = (double) spam_length / (double) (spam_length + nonspam_length); double p_nonspam = (double) nonspam_length / (double) (spam_length + nonspam_length); double p_spam_test = 0.0; double p_nonspam_test = 0.0; p_spam_test = (p_test_spam * p_spam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); p_nonspam_test = (p_test_nonspam * p_nonspam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); cout << "P(spam) = " << p_spam << endl; cout << "P(nonspam) = " << p_nonspam << endl; cout << "P(test|spam) = " << p_test_spam << endl; cout << "P(test|nonspam) = " << p_test_nonspam << endl; cout << "P(spam|test) = " << p_spam_test << endl; cout << "P(nonspam|test) = " << p_nonspam_test << endl; cout << "word count is" << words.size() << endl; if (p_spam_test > p_nonspam) { cout << "Message is a spam." << endl; } else { cout << "Message is not a spam." << endl; } } void run_gpu(int word_size) { double p_test_spam = 1.0; double p_test_nonspam = 1.0; // ------------------------------- CUDA ----------------------- int dim = word_size; int blockSize = 1024; size_t max_elems_per_block = blockSize * 2; // due to binary tree nature of algorithm size_t blocks = 0; if (dim <= max_elems_per_block) { blocks = (size_t) ceil(float(dim) / float(max_elems_per_block)); } else { blocks = dim / max_elems_per_block; if (dim % max_elems_per_block != 0) blocks++; } cout << "Blocks: " << blocks << endl; size_t double_array_dim = dim * sizeof(double); size_t bool_array_dim = dim * sizeof(bool); size_t out_dim = blocks * sizeof(double); double *vector = (double *)malloc(double_array_dim); bool *vector_binary = (bool *)malloc(bool_array_dim); double *dev_array; bool *dev_binary; double *dev_out; double *dev_total; int *first; int *subN; int *N; cudaMalloc(&dev_array, double_array_dim); cudaMalloc(&dev_binary, bool_array_dim); cudaMalloc(&dev_out, out_dim); cudaMallocManaged(&dev_total, sizeof(double)); cudaMallocManaged(&subN, sizeof(int)); cudaMallocManaged(&N, sizeof(int)); cudaMallocManaged(&first, sizeof(int)); *dev_total = 1.0; *first = 1; *N = dim; *subN = dim / blocks; cout << "Subarray count is " << *subN << endl; // NONSPAM_VECTOR // HOST ----> DEVICE if (cudaMemcpy(dev_array, nonspam_vector, double_array_dim, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "It could not be copied! 1" << endl; return; } if (cudaMemcpy(dev_binary, binary_vector, bool_array_dim, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "It could not be copied! 2" << endl; return; } cout << "Nonspam vector: " << endl; gpu_multiplication_reduce <<<blocks, blockSize >>>(first, subN, N, dev_array, dev_binary, dev_out); cudaDeviceSynchronize(); *first = 0; *subN = blocks; gpu_multiplication_reduce <<< 1, blocks >>>(first, subN, N, dev_out, dev_binary, dev_total); cudaDeviceSynchronize(); // the result is in the first element of the vector p_test_nonspam = dev_total[0]; // SPAM_VECTOR // HOST ----> DEVICE if (cudaMemcpy(dev_array, spam_vector, double_array_dim, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "It could not be copied! 4" << endl; return; } *first = 1; cout << "Spam vector: " << endl; *subN = dim / blocks; gpu_multiplication_reduce <<<blocks, blockSize >>>(first, subN, N, dev_array, dev_binary, dev_out); cudaDeviceSynchronize(); *first = 0; *subN = blocks; gpu_multiplication_reduce <<< 1, blocks >>>(first, subN, N, dev_out, dev_binary, dev_total); cudaDeviceSynchronize(); p_test_spam = dev_total[0]; free(vector); free(vector_binary); cudaFree(dev_array); cudaFree(dev_binary); cudaFree(dev_out); cudaFree(dev_total); cudaFree(first); // -------------------------------- CUDA END ------------------ double p_spam = (double)spam_length / (double)(spam_length + nonspam_length); double p_nonspam = (double)nonspam_length / (double)(spam_length + nonspam_length); double p_spam_test = 0.0; double p_nonspam_test = 0.0; p_spam_test = (p_test_spam * p_spam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); p_nonspam_test = (p_test_nonspam * p_nonspam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); cout << "P(spam) = " << p_spam << endl; cout << "P(nonspam) = " << p_nonspam << endl; cout << "P(test|spam) = " << p_test_spam << endl; cout << "P(test|nonspam) = " << p_test_nonspam << endl; cout << "P(spam|test) = " << p_spam_test << endl; cout << "P(nonspam|test) = " << p_nonspam_test << endl; if (p_spam_test > p_nonspam) { cout << "Message is a spam." << endl; } else { cout << "Message is not a spam." << endl; } } void run_naive_bayes(bool gpu, int word_size) { binary_vector = new bool[words.size()]; for (int i = 0; i < words.size(); i++) binary_vector[i] = false; string example; string word; string separator = " "; size_t position; cout << "Input test sample: "; cin.ignore(); getline(cin, example); int known_words = 0; int unknown_words = 0; while (example.compare("") == 1) { position = example.find(separator); word = example.substr(0, position); //cout << line << endl; example.erase(0, position + 1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); binary_vector[pos] = true; known_words++; } else { /* words does not contain word */ unknown_words++; } if (position == string::npos) { break; } } cout << "known_words: " << known_words << ", unkwnown_words: " << unknown_words << endl; if (gpu) { run_gpu(word_size); } else { run_cpu(word_size); } } int main() { // Init bool perform = true; bool ready = false; spam_file.open("spam_data.txt", ios::in | ios::out | ios::app); nonspam_file.open("nonspam_data.txt", ios::in | ios::out | ios::app); if (spam_file.good()==false) { cout << "Couldnt open spam_file."; getchar(); return 0; } if (nonspam_file.good() == false) { cout << "Couldnt open nonspam_file."; getchar(); return 0; } cout << "Naive Bayes Classifier as a anti-spam filter" << endl; int option; int word_size; while (perform) { cout << endl << " ---- Choose action (number): ---- " << endl; cout << "1. Add spam training examples. " << endl; cout << "2. Add non spam training examples. " << endl; cout << "3. Init Naive Bayes Algorithm." << endl; cout << "4. Run Naive Bayes Classifier (GPU)." << endl; cout << "5. Run Naive Bayes Classifier (CPU)." << endl; cout << "6. Print words and their occurencies." << endl; cout << "7. Print words and vectors" << endl; cout << "8. Exit." << endl; cin >> option; switch (option) { case 1: add_spam_training_examples(); ready = false; break; case 2: add_nonspam_training_examples(); ready = false; break; case 3: create_words_vector(); ready = true; break; case 4: if (ready) { cout << "Chose the word size:" << endl; cin >> word_size; run_naive_bayes(true, word_size); } else { cout << "Naive Bayes not initialized!" << endl; } break; case 5: if(ready) { cout << "Chose the word size:" << endl; cin >> word_size; run_naive_bayes(false, word_size); } break; case 6: print_words_and_occurencies(); break; case 7: print_vectors(); break; case 8: perform = false; break; default: cout << "Wrong!" << endl; } } // close txt files spam_file.close(); nonspam_file.close(); // delete tabs delete [] spam_vector; delete [] nonspam_vector; return 0; }
11,771
#include<stdio.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> __global__ void add(int *a,int *b,int *c) { *c=*a+*b; } int main() { int a,b,c; int *d_a,*d_b,*d_c; int size=sizeof(int); cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&d_b,size); cudaMalloc((void**)&d_c,size); a=3; b=5; cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice); add<<<1,1>>>(d_a,d_b,d_c); cudaMemcpy(&c,d_c,size,cudaMemcpyDeviceToHost); printf("Result: %d\n",c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
11,772
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> typedef unsigned int uint; using namespace std; // C[m,k] = A[m,n] * B[n,k] // m rows, k columns // C[m,k] is stored as such // i[m=1, k=1], i[m=1, k=2], // i[m=2, k=1], i[m=2, k=2], and so on void print_matrix(float *matrix, uint size) { uint how_many_elements = 25; if (size < 25) { how_many_elements = size; } for (uint i = 0; i < how_many_elements; i++) { printf(" %f", matrix[i]); } printf("\n"); } void host_matmul(float *a, float *b, float *c, uint m, uint n, uint k) { // Go row by row for (uint i = 0; i < m; i++) { // Go column by column for (uint j = 0; j < k; j++) { // Go through the cells of each dest matrix for (uint t = 0; t < n; t++) { // c (i, j) += a(i, t) * b(t, j) c[i * k + j] += a[i * n + t] * b[t * k + j]; } } } } __global__ void kernel_mathmul(float * a, float * b, float * c, uint k, uint n) { // This does not work yet when the columns count of the first matrix is > 256 or whatever... uint i = blockIdx.x * blockDim.x + threadIdx.x; if (i < k) { /* printf("[%d] ", i); if (i == 0) { printf("a now is: "); for (uint j = 0; j< n; j++) { printf("%f ", a[j]); } printf("\n"); } */ c[i] = 0; // reset it first, remember it doesn't get cleared everytime for (uint t=0; t<n; t++) { c[i] += a[t] * b[i + t*k]; } } } void dev_matmul(float *a, float *b, float *c, uint m, uint n, uint k) { int threads_per_block = 256; int blocks = ceil((float)(m) / threads_per_block); // printf("Number of blocks: [%d]\n", blocks); float * dev_a_onerow; cudaMalloc(&dev_a_onerow, n*sizeof(float)); float * dev_c_onerow; cudaMalloc(&dev_c_onerow, k*sizeof(float)); float * dev_b; cudaMalloc(&dev_b, m*n*sizeof(float)); cudaMemcpy(dev_b, b, m*n*sizeof(float), cudaMemcpyHostToDevice); // Compute the rows of resulting matrix one line by one line. for (uint i = 0; i < m; i++) { // copy one row of the a matrix to the device cudaMemcpy(dev_a_onerow, a + (i * n), n * sizeof(float), cudaMemcpyHostToDevice); //printf("Calling kernel mathmul\n"); kernel_mathmul <<<blocks,threads_per_block>>> (dev_a_onerow, dev_b, dev_c_onerow, k, n); cudaThreadSynchronize(); // copy the resulting row back cudaMemcpy(c + i * k, dev_c_onerow, k * sizeof(float), cudaMemcpyDeviceToHost); //print_matrix(c, m*k); } cudaFree(dev_a_onerow); cudaFree(dev_b); cudaFree(dev_c_onerow); } void init_matrix(float *matrix, uint size) { for (uint i = 0; i < size; i++) { matrix[i] = ((float) rand()) / RAND_MAX; //matrix[i] = 1.0; } } void clear_matrix(float *matrix, uint size) { for (uint i = 0; i < size; i++) { matrix[i] = 0.0; } } void verify_matrix(const float *matrix1, const float *matrix2, const uint size) { for (uint i = 0; i < size; i++) { assert(matrix1[i] == matrix2[i]); } } int main() { uint m = 200; uint n = 200; uint k = 200; srand( time (NULL) ); float * a_matrix = new float[m*n]; float * b_matrix = new float[n*k]; float * c_matrix = new float[m*k]; float * c_dev_matrix = new float[m*k]; clock_t start; clock_t end; /* printf("Initializing matrices with sample numbers\n", 2, 2); a_matrix[0] = 1; a_matrix[2] = -2; a_matrix[4] = 3; a_matrix[5] = -1; b_matrix[1] = 3; b_matrix[2] = -2; b_matrix[3] = -1; b_matrix[5] = 4; printf("Here comes the first 25 elements of a, b, c before:\n"); print_matrix(a_matrix, m*n); print_matrix(b_matrix, n*k); print_matrix(c_matrix, m*k); printf("Do host-calculation:\n"); host_matmul(a_matrix, b_matrix, c_matrix, m, n, k); printf("Here comes the first 25 elements of a, b, c after:\n"); print_matrix(a_matrix, m*n); print_matrix(b_matrix, n*k); print_matrix(c_matrix, m*k); printf("Asserting that host-calculation is correct: "); assert(c_matrix[0] == 0); assert(c_matrix[1] == -5); assert(c_matrix[2] == -6); assert(c_matrix[3] == -7); printf("It is.\n"); //////////////////////////////////////////////// */ printf("Intialize random matrices: "); init_matrix(a_matrix, m*n); init_matrix(b_matrix, n*k); printf("Done. \n"); print_matrix(a_matrix, m*n); print_matrix(b_matrix, n*k); print_matrix(c_matrix, m*k); /////////////////////////////////////////////// printf("Do host-calculation: "); clear_matrix(c_matrix, m*k); start = clock(); host_matmul(a_matrix, b_matrix, c_matrix, m, n, k); end = clock(); printf("Took %d clock cycles\n", end - start); printf("Here comes the first 25 elements of c:\n"); print_matrix(c_matrix, m*k); printf("Do device-calculation: "); start = clock(); dev_matmul(a_matrix, b_matrix, c_dev_matrix, m, n, k); end = clock(); printf("Took %d clock cycles\n", end - start); printf("Here comes the first 25 elements of c on device:\n"); print_matrix(c_dev_matrix, m*k); printf("Checking if GPU result is correct: "); verify_matrix(c_matrix, c_dev_matrix, m*k); printf("It is."); delete a_matrix; delete b_matrix; delete c_matrix; delete c_dev_matrix; }
11,773
// from https://www.programmersought.com/article/58352347721/ // super-clean formula #define RGB2Y(R, G, B) ( 16 + 0.183f * (R) + 0.614f * (G) + 0.062f * (B) ) #define RGB2U(R, G, B) ( 128 - 0.101f * (R) - 0.339f * (G) + 0.439f * (B) ) #define RGB2V(R, G, B) ( 128 + 0.439f * (R) - 0.399f * (G) - 0.040f * (B) ) #define YUV2R(Y, U, V) ( 1.164f *((Y) - 16) + 1.792f * ((V) - 128) ) #define YUV2G(Y, U, V) ( 1.164f *((Y) - 16) - 0.213f *((U) - 128) - 0.534f *((V) - 128) ) #define YUV2B(Y, U, V) ( 1.164f *((Y) - 16) + 2.114f *((U) - 128)) #define CLIPVALUE(x, minValue, maxValue) ((x) < (minValue) ? (minValue) : ((x) > (maxValue) ? (maxValue) : (x))) __global__ static void __RgbToYuv420p(const unsigned char* dpRgbData, size_t rgbPitch, unsigned char* dpYuv420pData, size_t yuv420Pitch, int width, int height) { int index = blockIdx.x * blockDim.x + threadIdx.x; int w = index % yuv420Pitch; int h = index / yuv420Pitch; if (w >= width || h >= height) return; unsigned char* dp_y_data = dpYuv420pData; unsigned char* dp_u_data = dp_y_data + height * yuv420Pitch; unsigned char* dp_v_data = dp_u_data + height * yuv420Pitch / 4; unsigned char r = dpRgbData[h * rgbPitch + w * 3 + 0]; unsigned char g = dpRgbData[h * rgbPitch + w * 3 + 1]; unsigned char b = dpRgbData[h * rgbPitch + w * 3 + 2]; dp_y_data[h * yuv420Pitch + w] = (unsigned char)(CLIPVALUE(RGB2Y(r, g, b), 0, 255)); int num = h / 2 * width / 2 + w / 2; int offset = num / width * (yuv420Pitch - width); if (h % 2 == 0 && w % 2 == 0) { dp_u_data[num + offset] = (unsigned char)(CLIPVALUE(RGB2U(r, g, b), 0, 255)); dp_v_data[num + offset] = (unsigned char)(CLIPVALUE(RGB2V(r, g, b), 0, 255)); } } __global__ static void __RgbToNv12(const unsigned char* dpRgbData, size_t rgbPitch, unsigned char* dpNv12Data, size_t nv12Pitch, int width, int height) { int index = blockIdx.x * blockDim.x + threadIdx.x; int w = index % nv12Pitch; int h = index / nv12Pitch; if (w >= width || h >= height) return; unsigned char* dp_y_data = dpNv12Data; unsigned char* dp_u_data = dp_y_data + height * nv12Pitch; unsigned char r = dpRgbData[h * rgbPitch + w * 3 + 0]; unsigned char g = dpRgbData[h * rgbPitch + w * 3 + 1]; unsigned char b = dpRgbData[h * rgbPitch + w * 3 + 2]; dp_y_data[h * nv12Pitch + w] = (unsigned char)CLIPVALUE(RGB2Y(r, g, b), 0, 255); int num = (h / 2 * width / 2 + w / 2); int offset = (num * 2 + 1) / width * (nv12Pitch - width); if (h % 2 == 0 && w % 2 == 0) { dp_u_data[num * 2 + 0 + offset] = (unsigned char)(CLIPVALUE(RGB2U(r, g, b), 0, 255)); dp_u_data[num * 2 + 1 + offset] = (unsigned char)(CLIPVALUE(RGB2V(r, g, b), 0, 255)); } } __global__ static void __RgbToYuv422p(const unsigned char* dpRgbData, size_t rgbPitch, unsigned char* dpYuv422pData, size_t yuv422pPitch, int width, int height) { int index = blockIdx.x * blockDim.x + threadIdx.x; int w = index % yuv422pPitch; int h = index / yuv422pPitch; if (w >= width || h >= height) return; unsigned char* dp_y_data = dpYuv422pData; unsigned char* dp_u_data = dp_y_data + height * yuv422pPitch; unsigned char* dp_v_data = dp_u_data + height / 2 * yuv422pPitch; unsigned char r = dpRgbData[h * rgbPitch + w * 3 + 0]; unsigned char g = dpRgbData[h * rgbPitch + w * 3 + 1]; unsigned char b = dpRgbData[h * rgbPitch + w * 3 + 2]; dp_y_data[h * yuv422pPitch + w] = (unsigned char)CLIPVALUE(RGB2Y(r, g, b), 0, 255); int num = h * width / 2 + w / 2; int offset = num / width * (yuv422pPitch - width); if (w % 2 == 0) { dp_u_data[num + offset] = (unsigned char)(CLIPVALUE(RGB2U(r, g, b), 0, 255)); dp_v_data[num + offset] = (unsigned char)(CLIPVALUE(RGB2V(r, g, b), 0, 255)); } }
11,774
#include <cmath> #include <unistd.h> #include <iostream> #include <cuda_runtime_api.h> #define N 257 using namespace std; // Assumes that the block and grid sizes divide the width of array // Doesn't to bounds checking // Potential trouble! // Use with: dim3 block(32, 32); dim3 grid(8, 8); __global__ void matcpyBad(const float *in, float *out) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int width = blockDim.x * gridDim.x; out[x + y * width] = in[x + y * width]; } // Use with: dim3 block(32, 32); dim3 grid(8, 2); __global__ void matcpyBadWithLoop(const float *in, float *out) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int width = blockDim.x * gridDim.x; for (int j = 0; j < 4 * blockDim.y * gridDim.y; j += blockDim.y * gridDim.y) { int idx = (y + j) * width + x; out[idx] = in[idx]; } } // Works with different blocks sizes __global__ void matcpy(const float *in, int size, float *out) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int idx = x + y * blockDim.x * gridDim.x; if (idx < size) { out[idx] = in[idx]; } } __global__ void matcpyWithLoop(const float *in, int size, float *out) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int width = blockDim.x * gridDim.x; for (int j = 0; j < 4 * blockDim.y * gridDim.y; j += blockDim.y * gridDim.y) { int idx = (y + j) * width + x; if (idx < size) { out[idx] = in[idx]; } } } int main() { size_t size = N * N * sizeof(float); dim3 block(32, 32); dim3 grid((N - 1) / block.x + 1, (N - 1) / (4 * block.y) + 1); float in[N * N]; float out[N * N] = {0.0f}; for (int i = 0; i < N * N; i++) { in[i] = i; } float *dIn; float *dOut; cudaMalloc((void **)&dIn, size); cudaMalloc((void **)&dOut, size); cudaMemcpy(dIn, in, size, cudaMemcpyHostToDevice); matcpyWithLoop<<<grid, block>>>(dIn, N * N, dOut); cudaMemcpy(out, dOut, size, cudaMemcpyDeviceToHost); bool areEqual = true; for (int i = 0; i < N * N; i++) { areEqual &= abs(in[i] - out[i]) < 0.00001; } if (areEqual) { cout << "copy successful" << endl; } else { cout << "copy failed" << endl; } // usleep(1000000); return 0; }
11,775
#include <iostream> #include <cmath> // CUDA Kernel function to add the elements of two arrays on the GPU __global__ void add(int n, float* x, float* y) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of current thread within its block int stride = gridDim.x * blockDim.x; // number of threads in block for (int i = index; i < n; i += stride) { y[i] += x[i]; } } int main(void) { int N = 1<<20; // 1M elements float *x, *y; // allocation unifed memory -- accessible from CPU and GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); for (int i = 0; i < N; i++) { y[i] = 1.f; x[i] = 2.f; } // launching add() kernel, which invokes on the GPU // <<<n, m>>> -- m is number of threads in a thread block, n is number of // thread blocks; int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, 256>>>(N, x, y); // making CPU to wait until the kernel is done cudaDeviceSynchronize(); float maxError = 0.f; for (int i = 0; i < N; i++) maxError = std::fmax(maxError, std::fabs(y[i] - 3.f)); std::cout << "Max error: " << maxError << std::endl; // free memory cudaFree(x); cudaFree(y); return 0; }
11,776
#include "includes.h" __global__ void KerComputeSpsTau(unsigned n,unsigned pini,float smag,float blin ,const float4 *velrhop,const float2 *gradvelff,float2 *tauff) { unsigned p=blockIdx.x*blockDim.x + threadIdx.x; if(p<n){ const unsigned p1=p+pini; float2 rr=gradvelff[p1*3]; const float grad_xx=rr.x,grad_xy=rr.y; rr=gradvelff[p1*3+1]; const float grad_xz=rr.x,grad_yy=rr.y; rr=gradvelff[p1*3+2]; const float grad_yz=rr.x,grad_zz=rr.y; const float pow1=grad_xx*grad_xx + grad_yy*grad_yy + grad_zz*grad_zz; const float prr= grad_xy*grad_xy + grad_xz*grad_xz + grad_yz*grad_yz + pow1+pow1; const float visc_sps=smag*sqrt(prr); const float div_u=grad_xx+grad_yy+grad_zz; const float sps_k=(2.0f/3.0f)*visc_sps*div_u; const float sps_blin=blin*prr; const float sumsps=-(sps_k+sps_blin); const float twovisc_sps=(visc_sps+visc_sps); float one_rho2=1.0f/velrhop[p1].w; //-Computes new values of tau[]. const float tau_xx=one_rho2*(twovisc_sps*grad_xx +sumsps); const float tau_xy=one_rho2*(visc_sps *grad_xy); tauff[p1*3]=make_float2(tau_xx,tau_xy); const float tau_xz=one_rho2*(visc_sps *grad_xz); const float tau_yy=one_rho2*(twovisc_sps*grad_yy +sumsps); tauff[p1*3+1]=make_float2(tau_xz,tau_yy); const float tau_yz=one_rho2*(visc_sps *grad_yz); const float tau_zz=one_rho2*(twovisc_sps*grad_zz +sumsps); tauff[p1*3+2]=make_float2(tau_yz,tau_zz); } }
11,777
// Código Matrix multiplication: C = A * B. Super simplificado. #include <stdio.h> #include <cuda_runtime.h> #include <chrono> #define BLOCK_SIZE 32 #ifndef VECTOR_SIZE #define VECTOR_SIZE 128 #endif void MatrixMulCpuSeq(float* M, float* N, float* P, int size) { for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { for (int k = 0; k < size; k++) { P[i*size+j] += M[i*size+k] * N[k*size+j]; } } } } int main(int argc, char **argv) { const int size = VECTOR_SIZE; // const int nIter = 100000; float *h_A = (float *)malloc(sizeof(float) * size * size); float *h_B = (float *)malloc(sizeof(float) * size * size); float *h_C = (float *)malloc(sizeof(float) * size * size); for (int i = 0; i < size * size; ++i) { h_A[i] = 1.0f; h_B[i] = 1.0f; } // Nao precisa alocar GPU // float *d_A, *d_B, *d_C; // cudaMalloc((void **) &d_A, sizeof(float) * size * size); // cudaMalloc((void **) &d_B, sizeof(float) * size * size); // cudaMalloc((void **) &d_C, sizeof(float) * size * size); // cudaMemcpy(d_A, h_A, sizeof(float) * size * size, cudaMemcpyHostToDevice); // cudaMemcpy(d_B, h_B, sizeof(float) * size * size, cudaMemcpyHostToDevice); // dim3 threads(BLOCK_SIZE, BLOCK_SIZE); // dim3 grid(size / threads.x, size / threads.y); // SEQUENCIAL cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); // for (int j = 0; j < nIter; j++) { MatrixMulCpuSeq(h_A, h_B, h_C, size); // } cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); float msecPerMatrixMul = msecTotal; printf("Time= %f\n", msecPerMatrixMul); // Copy result from device to host // cudaMemcpy(h_C, d_C, sizeof(float) * size * size, cudaMemcpyDeviceToHost); // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6 ; // machine zero for (int i = 0; i < (int)(size * size); i++) { double abs_err = fabs(h_C[i] - (size * 1.0f)); double abs_val = fabs(h_C[i]); double rel_err = abs_err/abs_val/size ; if (rel_err > eps) printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], size*1.0f, eps); } free(h_A); free(h_B); free(h_C); // cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return(0); }
11,778
//#define REARRANGED_DOMAIN __global__ void _interpolate_from_vertices_to_edges( int N, double* vertex_values, double* edge_values) { const int k = threadIdx.x+threadIdx.y*blockDim.x+ (blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y; if ( k >= N ) return; #ifndef REARRANGED_DOMAIN int k3= k*3; #endif double q0, q1, q2; //for (k=0; k<N; k++) { #ifndef REARRANGED_DOMAIN q0 = vertex_values[k3 + 0]; q1 = vertex_values[k3 + 1]; q2 = vertex_values[k3 + 2]; edge_values[k3 + 0] = 0.5*(q1+q2); edge_values[k3 + 1] = 0.5*(q0+q2); edge_values[k3 + 2] = 0.5*(q0+q1); #else q0 = vertex_values[k]; q1 = vertex_values[k + N]; q2 = vertex_values[k + 2*N]; edge_values[k] = 0.5*(q1+q2); edge_values[k + N] = 0.5*(q0+q2); edge_values[k + 2*N] = 0.5*(q0+q1); #endif //} }
11,779
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> __global__ void dev_matrix_add(int dim, float * A, float * B, float * result) { int row = (blockIdx.y * blockDim.y) + threadIdx.y; int col = (blockIdx.x * blockDim.x) + threadIdx.x; if(row < dim && col < dim) result[row * dim + col] = A[row * dim + col] + B[row * dim + col]; } __global__ void dev_matrix_sub(int dim, float * A, float * B, float * result) { int row = (blockIdx.y * blockDim.y) + threadIdx.y; int col = (blockIdx.x * blockDim.x) + threadIdx.x; if(row < dim && col < dim) result[row * dim + col] = A[row * dim + col] - B[row * dim + col]; } __global__ void dev_matrix_mult(int dim, float * A, float * B, float * result) { int row = (blockIdx.y * blockDim.y) + threadIdx.y; int col = (blockIdx.x * blockDim.x) + threadIdx.x; if(row >= dim || col >= dim) return; float sum = 0.0f; for (int i = 0; i < dim; i++) { sum += A[row * dim + i] * B[col * dim + i]; } result[row * dim + col] = sum; } __global__ void dev_initialize(int dim, float * tar, float val) { int row = (blockIdx.y * blockDim.y) + threadIdx.y; int col = (blockIdx.x * blockDim.x) + threadIdx.x; if(row >= dim || col >= dim) return; tar[row * dim + col] = val; } void matrix_add(int dim, float * A, float * B, float * result) { for(int row = 0; row < dim; row++) { for(int col = 0; col < dim; col++) { result[row * dim + col] = A[row * dim + col] + B[row * dim + col]; } } } void matrix_sub(int dim, float * A, float * B, float * result) { for(int row = 0; row < dim; row++) { for(int col = 0; col < dim; col++) { result[row * dim + col] = A[row * dim + col] - B[row * dim + col]; } } } void matrix_mult(int dim, float * A, float * B, float * result) { for(int row = 0; row < dim; row++) { for(int col = 0; col < dim; col++) { float sum = 0.0f; for (int i = 0; i < dim; i++) { sum += A[row * dim + i] * B[col * dim + i]; } result[row * dim + col] = sum; } } } int main(int argc, char** argv) { float * A, *B, * result, * dev_A, * dev_B, * dev_result; int M(4); int tileWidth = 2; int N = M * M * sizeof(float); A = (float*) malloc( N); B = (float*) malloc(N); result = (float*) malloc(N); cudaMalloc((void**) & dev_A, N); cudaMalloc((void**) & dev_B, N); cudaMalloc((void**) & dev_result,N); //initialize for(int i = 0;i<M*M;i++) { A[i] = 1.0f; B[i] = 2.0f; } cudaMemcpy(dev_A,A,N,cudaMemcpyHostToDevice); cudaMemcpy(dev_B,B,N,cudaMemcpyHostToDevice); dim3 gridDim((int)ceil((float)M/(float)tileWidth),(int)ceil((float)M/(float)tileWidth)); dim3 blockDim(tileWidth,tileWidth); //dev_initialize<<<gridDim,blockDim>>>(M,dev_A,1.0f); //dev_initialize<<<gridDim,blockDim>>>(M,dev_B,1.0f); dev_matrix_mult<<<gridDim,blockDim>>>(M,dev_A,dev_B,dev_result); cudaMemcpy(result, dev_result,N,cudaMemcpyDeviceToHost); //display matrix std::cout<<"matrix A:"<<std::endl; for(int i = 0;i<M*M;i++) { std::cout<<A[i]<<" "; } std::cout<<std::endl<<"matrix B:"<<std::endl; for(int i = 0;i<M*M;i++) { std::cout<<B[i]<<" "; } std::cout<<std::endl<<"result:"<<std::endl; for(int i = 0;i<M*M;i++) { std::cout<<result[i]<<" "; } std::cin.get(); return 0; }
11,780
/* Includes, system */ #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #define N 1024 #define BLOCK_SIZE 512 /* DEVICE CODE */ __global__ void IntProd(int* intprodParcial,int* d1,int* d2){ __shared__ double accumResult[BLOCK_SIZE]; int pos = blockIdx.x * blockDim.x + threadIdx.x; int iAccum = threadIdx.x; int vec = blockIdx.x; // accumResult[iAccum] = d1[pos] * d2[pos]; accumResult[iAccum] = d1[pos]; for(int stride = BLOCK_SIZE / 2; stride > 0; stride >>= 1){ __syncthreads(); accumResult[iAccum] += accumResult[stride + iAccum]; } if(threadIdx.x == 0) { intprodParcial[vec] = accumResult[0]; } } __global__ void suma_2_enteros(int *d1, int *d2, int *sum){ int idBloque = blockIdx.y * gridDim.x + blockIdx.x; int idThread = idBloque * blockDim.z * blockDim.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; sum[idThread] = d1[idThread] + d2[idThread]; } /* HOST CODE*/ int main(int argc, char** argv) { int DeviceCount = 0,i; int *h_d1,*h_d2,*h_sum; int *d_d1,*d_d2,*d_sum; h_d1 = (int*)malloc(N * sizeof(h_d1[0])); h_d2 = (int*)malloc(N * sizeof(h_d2[0])); h_sum = (int*)malloc(N * sizeof(h_sum[0])); for (i=0;i<N;i++){h_d1[i]=i;h_d2[i]=i;h_sum[i]=0;} /* Initialize CUDA */ if (cuInit(0) != 0){ printf("ERROR de inicializacion\n"); exit(0); } cuDeviceGetCount(&DeviceCount); if (DeviceCount == 0){ printf("ERROR ningun dispositivo soporta CUDA\n"); exit(0); } cudaMalloc((void**)&d_d1,N*sizeof(d_d1));cudaMemset(d_d1,0,N*sizeof(d_d1)); cudaMalloc((void**)&d_d2,N*sizeof(d_d2));cudaMemset(d_d2,0,N*sizeof(d_d2)); cudaMalloc((void**)&d_sum,N*sizeof(d_sum));cudaMemset(d_sum,0,N*sizeof(d_sum)); cudaMemcpy(d_d1,h_d1,N*sizeof(h_d1[0]),cudaMemcpyHostToDevice); cudaMemcpy(d_d2,h_d2,N*sizeof(h_d2[0]),cudaMemcpyHostToDevice); IntProd<<<2,BLOCK_SIZE>>>(d_sum,d_d1,d_d2); cudaMemcpy(h_sum,d_sum,N*sizeof(h_sum[0]),cudaMemcpyDeviceToHost); printf("Resultado: %d \n",h_sum[0]+h_sum[1]); cudaFree(d_d1);cudaFree(d_d2);cudaFree(d_sum); }
11,781
/* Single Author info: prajago4 Priyadarshini Rajagopal Group info: 1. avelayu Ashitha Velayudhan 2. prajago4 Priyadarshini Rajagopal 3. smnatara Sekharan Muthusamy Natarajan */ #include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <time.h> #define __DEBUG #define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__) extern int tpdt(double *t, double dt, double end_time); /************************************** * void __cudaSafeCall(cudaError err, const char *file, const int line) * void __cudaCheckError(const char *file, const int line) * * These routines were taken from the GPU Computing SDK * (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h" **************************************/ inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment if not needed. /*err = cudaThreadSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); }*/ } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } __global__ void evolve_kernal(double *un, double *uc, double *uo, double *pebbles, double h, double dt,double t) { int threads_per_block = blockDim.x * blockDim.y; int idx = threadIdx.x + blockIdx.x*blockDim.x + threadIdx.y*blockDim.x*gridDim.x + blockIdx.y*(threads_per_block)*gridDim.x; int n = gridDim.x * blockDim.x ; float f = -expf(-1.0 * t) * pebbles[idx]; if( (blockIdx.x == 0 && threadIdx.x == 0) || (blockIdx.x == gridDim.x - 1 && threadIdx.x == blockDim.x - 1) || (blockIdx.y == 0 && threadIdx.y == 0) || (blockIdx.y == gridDim.y - 1 && threadIdx.y == blockDim.y - 1)) { un[idx] = 0.; } else { /* un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *(( WEST + EAST + NORTH + SOUTH + 0.25*(NORTHWEST + NORTHEAST + SOUTHWEST + SOUTHEAST)- 5 * uc[idx])/(h * h) + f(pebbles[idx],t)); */ un[idx] = 2*uc[idx] - uo[idx] + 0.1 *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25 * (uc[idx+n-1] + uc[idx+n+1] + uc[idx-n-1] + uc[idx-n+1])- 5 * uc[idx])/(h * h) + f); } } void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads) { cudaEvent_t kstart, kstop; float ktime; /* HW2: Define your local variables here */ double t, dt; t = 0.; dt = h / 2; /* Set up device timers */ CUDA_CALL(cudaSetDevice(0)); CUDA_CALL(cudaEventCreate(&kstart)); CUDA_CALL(cudaEventCreate(&kstop)); /* HW2: Add CUDA kernel call preperation code here */ /*Variables on the device:*/ double *u_dev,*u0_dev,*u1_dev,*pebbles_dev; /*Allocate memory for device variables*/ CUDA_CALL(cudaMalloc((void **)&u_dev,sizeof(double)*n*n)); CUDA_CALL(cudaMalloc((void **)&u0_dev,sizeof(double)*n*n)); CUDA_CALL(cudaMalloc((void **)&u1_dev,sizeof(double)*n*n)); CUDA_CALL(cudaMalloc((void **)&pebbles_dev,sizeof(double)*n*n)); //Setting grid and block dimensions dim3 grid(n/nthreads,n/nthreads); dim3 block(nthreads,nthreads); /* Start GPU computation timer */ CUDA_CALL(cudaEventRecord(kstart, 0)); //Transferring data from host to device memory CUDA_CALL(cudaMemcpy(u0_dev,u0,sizeof(double)*n*n,cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(u1_dev,u1,sizeof(double)*n*n,cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(pebbles_dev,pebbles,sizeof(double)*n*n,cudaMemcpyHostToDevice)); /* HW2: Add main lake simulation loop here */ while(1) { evolve_kernal<<<grid,block>>>(u_dev, u1_dev, u0_dev, pebbles_dev, h, dt, t); CUDA_CALL(cudaMemcpy(u0_dev,u1_dev,sizeof(double)*n*n,cudaMemcpyDeviceToDevice)); CUDA_CALL(cudaMemcpy(u1_dev,u_dev,sizeof(double)*n*n,cudaMemcpyDeviceToDevice)); if(!tpdt(&t, dt, end_time)) break; } CUDA_CALL(cudaMemcpy(u,u_dev,sizeof(double)*n*n,cudaMemcpyDeviceToHost)); /* Stop GPU computation timer */ CUDA_CALL(cudaEventRecord(kstop, 0)); CUDA_CALL(cudaEventSynchronize(kstop)); CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop)); printf("GPU computation: %f msec\n", ktime); /* HW2: Add post CUDA kernel call processing and cleanup here */ /* timer cleanup */ CUDA_CALL(cudaEventDestroy(kstart)); CUDA_CALL(cudaEventDestroy(kstop)); }
11,782
#include "includes.h" __global__ void var(int *a,int *b,int n,float mean) { int block=256*blockIdx.x; float sum=0; for(int i=block;i<min(block+256,n);i++) { sum=sum+(a[i]-mean)*(a[i]-mean); } b[blockIdx.x]=sum; }
11,783
/* * Sergio Isaac Mercado Silvano * A01020382 * CUDA - Lab 1 * 12/11/2019 */ #include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #define MIN(a,b)(a<b?a:b) #define NUM_RECTS 1e9 #define THREADS 256 #define BLOCKS MIN(32,(NUM_RECTS + THREADS - 1)/THREADS) __global__ void kernel(float width, float *results){ __shared__ long cache[THREADS]; //Shared array between threads for all blocks int i, cacheIndex; float acum, mid, height; i = threadIdx.x + (blockIdx.x * blockDim.x); //Id for current thread cacheIndex = threadIdx.x; acum = 0; //Acumulated results //Actual approximation calculations while(i < NUM_RECTS){ mid = (i + 0.5) * width; height = 4.0/(1.0 + mid * mid); acum += height; i+= blockDim.x * gridDim.x; } cache[cacheIndex] = acum; //Adding the result for current block (cacheIndex) __syncthreads(); //Wait for all threads to finish up previous operations (mutex) //Reduction for adding up all partial results in every block i = blockDim.x/2; while(i > 0){ if(cacheIndex < i){ cache[cacheIndex] += cache[cacheIndex + i]; } __syncthreads(); //Same as previous syncthreads() i /= 2; } //End Reduction if(cacheIndex == 0){ //Base case results[blockIdx.x] = cache[cacheIndex]; //Store results in corresponding index } } int main(void){ float acum, width, area; float *results, *dr; int i; width = 1.0/ (double) NUM_RECTS; /* * Malloc for result array where we will save all the calculated values in * kernel call */ results = (float*) malloc(BLOCKS * sizeof(float)); /* * cudaMalloc for working array that will let us work with the kernel * environment */ cudaMalloc((void**) &dr, BLOCKS * sizeof(float)); kernel<<<BLOCKS,THREADS>>> (width, dr); //kernel call for GPU calculation /* * cudaMemcpy for storing the results from the dr* array (which was used) * in the kernel to store the results into the results* array for further use */ cudaMemcpy(results, dr, BLOCKS * sizeof(float), cudaMemcpyDeviceToHost); /* * CPU operation for adding every value stored in the previously calculated * results from the kernel call to an acumulator variable. */ acum = 0; for(i = 0; i < BLOCKS; i++){ acum += results[i]; } area = width * acum; //Final calculation for the area (pi) printf("PI = %.15lf\n", area); //IMPORTANT: Free up device and host memory cudaFree(dr); free(results); return 0; } //
11,784
#include "includes.h" #define UPPERTHRESHOLD 90 #define LOWERTHRESHOLD 30 const float G_x[3 * 3] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 }; const float G_y[3 * 3] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 }; const float gaussian[5 * 5] = { 2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159, 4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159, 5.f/159, 12.f/159, 15.f/159, 12.f/159, 2.f/159, 4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159, 2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159 }; __global__ void kernSmooth(int N, int width, int height, unsigned char * in, unsigned char * out, const float * kernel, int kernSize) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x >= width || y >= height) { return; } float c = 0.0f; for (int i = 0; i < kernSize; i++) { int tx = x + i - kernSize/2; for (int j = 0; j < kernSize; j++) { int ty = y + j - kernSize/2; if (tx >= 0 && ty >= 0 && tx < width && ty < height) { c += in[ty * width + tx] * kernel[j * kernSize + i]; } } } out[y * width + x] = fabs(c); }
11,785
#include "includes.h" __device__ inline unsigned int RM_Index(unsigned int row, unsigned int col, unsigned int width) { return (row * width + col); } __global__ void ComplementNBCalcKernel(const float *d_data, const int *d_labels, float *per_class_feature_sum_, float *per_feature_sum_, unsigned int n_samples_, unsigned int n_features_) { // Each thread will take care of one term for all docs unsigned int tidx = threadIdx.x; unsigned int feat_col = tidx + (blockIdx.x * blockDim.x); unsigned int i = 0, row = 0; if (feat_col < n_features_) { // End condition check // For each document / sample for (i = 0; i < n_samples_; ++i) { row = d_labels[i]; // No race condition since each thread deals with one feature only // This is embarrasingly parallel per_class_feature_sum_[RM_Index(row, feat_col, n_features_)] += d_data[RM_Index(i, feat_col, n_features_)]; per_feature_sum_[feat_col] += d_data[RM_Index(i, feat_col, n_features_)]; } } return; }
11,786
#include <stdio.h> #include <stdlib.h> __global__ void cu_sobel(int *l_source_array_d, int *l_result_array_d, int rows, int column_size) { int x_0, x_1, x_2, x_3, x_5, x_6, x_7, x_8, sum_0, sum_1, sum; int pos = blockIdx.x * column_size + threadIdx.x; int row = pos / column_size; int col = pos % column_size; bool top = (row == 0); bool bottom = (row == (rows - 1)); bool left_edge = (col == 0); bool right_edge = (col == (column_size - 1)); if (top == false && bottom == false && left_edge == false && right_edge == false) { x_0 = l_source_array_d[(row - 1) * column_size + (col - 1)]; x_1 = l_source_array_d[(row - 1) * column_size + (col)]; x_2 = l_source_array_d[(row - 1) * column_size + (col + 1)]; x_3 = l_source_array_d[(row)*column_size + (col - 1)]; x_5 = l_source_array_d[(row)*column_size + (col + 1)]; x_6 = l_source_array_d[(row + 1) * column_size + (col - 1)]; x_7 = l_source_array_d[(row + 1) * column_size + (col)]; x_8 = l_source_array_d[(row + 1) * column_size + (col + 1)]; sum_0 = (x_0 + (2 * x_1) + x_2) - (x_6 + (2 * x_7) + x_8); sum_1 = (x_2 + (2 * x_5) + x_8) - (x_0 + (2 * x_3) + x_6); sum = sum_0 + sum_1; if (sum > 20) { sum = 255; } else { sum = 0; } l_result_array_d[((row - 1) * (column_size - 2)) + (col - 1)] = sum; } } // Called from driver program. Handles running GPU calculation extern "C" void gpu_sobel(int *l_source_array, int *l_result_array, int src_rows, int src_column_size) { int num_bytes_source = src_column_size * src_rows * sizeof(int); int *l_source_array_d; int *l_result_array_d; cudaMalloc((void **)&l_source_array_d, num_bytes_source); cudaMemcpy(l_source_array_d, l_source_array, num_bytes_source, cudaMemcpyHostToDevice); int result_column_size = src_column_size - 2; int result_row_size = src_rows - 2; int num_bytes_result = result_column_size * result_row_size * sizeof(int); cudaMalloc((void **)&l_result_array_d, num_bytes_result); // block size should be adjusted to the problem size for performance dim3 block_size(src_column_size); // grid size should limit the amount of work to be completed dim3 grid_size(src_rows); cu_sobel<<<grid_size, block_size>>>(l_source_array_d, l_result_array_d, src_rows, src_column_size); // transfer results back to host cudaMemcpy(l_result_array, l_result_array_d, num_bytes_result, cudaMemcpyDeviceToHost); // release the memory on the GPU cudaFree(l_source_array_d); cudaFree(l_result_array_d); }
11,787
#include "includes.h" __global__ void BilinearAddSubImageKernel(float *input, float *opImage, float* subImageDefs, int inputWidth, int inputHeight, int opImageWidth, int opImageHeight) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; float subImgCX = subImageDefs[0]; // <-1, 1> float subImgCY = subImageDefs[1]; // <-1, 1> float subImgDiameter = subImageDefs[2]; // <0,1> int maxDiameter = min(inputWidth, inputHeight); int diameterPix = (int)(subImgDiameter * maxDiameter); diameterPix = max(1, diameterPix); int subImgX = (int)(inputWidth * (subImgCX + 1) * 0.5f) - diameterPix / 2; int subImgY = (int)(inputHeight * (subImgCY + 1) * 0.5f) - diameterPix / 2; int px = id % diameterPix; int py = id / diameterPix; if (px + subImgX >= 0 && py + subImgY >= 0 && px + subImgX < inputWidth && py + subImgY < inputHeight && py < diameterPix ) { float xRatio = (float)(opImageWidth - 1) / (diameterPix); float yRatio = (float)(opImageHeight - 1) / (diameterPix); int x = (int) (xRatio * px); int y = (int) (yRatio * py); // X and Y distance difference float xDist = (xRatio * px) - x; float yDist = (yRatio * py) - y; // Points float topLeft= opImage[y * opImageWidth + x]; float topRight = opImage[y * opImageWidth + x + 1]; float bottomLeft = opImage[(y + 1) * opImageWidth + x]; float bottomRight = opImage[(y + 1) * opImageWidth + x + 1]; float result = topLeft * (1 - xDist) * (1 - yDist) + topRight * xDist * (1 - yDist) + bottomLeft * yDist * (1 - xDist) + bottomRight * xDist * yDist; input[(py + subImgY) * inputWidth + px + subImgX] += result; } }
11,788
#include <stdio.h> __global__ void dot_product(int *d_a, int *d_b,int *d_c){ int i = threadIdx.x; __shared__ int temp[4]; temp[i] = d_a[i] * d_b[i]; __syncthreads(); if(i==1){ int sum = 0; sum = temp[0]+temp[1]+temp[2]+temp[3]; *d_c = sum; } } int main(void){ printf("Hello, World - from CPU!\n"); int a[4] = {22,13,16,5}; int b[4] = {5,22,17,37}; int c[0]; int *d_a; int *d_b; int *d_c; cudaMalloc((void**)&d_a,sizeof(int)*4); cudaMalloc((void**)&d_b,sizeof(int)*4); cudaMalloc((void**)&d_c,sizeof(int)); cudaMemcpy(d_a,a,sizeof(int)*4,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,sizeof(int)*4,cudaMemcpyHostToDevice); dot_product<<<2,2>>>(d_a,d_b,d_c); cudaMemcpy(c,d_c,sizeof(int)*1,cudaMemcpyDeviceToHost); printf("%d",c[0]); //printf("%d,%d,%d,%d\n",c[0],c[1],c[2],c[3]); return 0; }
11,789
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> class cuStopwatch{ public: cuStopwatch(); ~cuStopwatch(); void start(); float stop(); private: float elapsedTime; bool started; cudaEvent_t startTime; cudaEvent_t endTime; }; cuStopwatch::cuStopwatch(){ started = false; elapsedTime = 0; cudaError_t res = cudaEventCreate(&startTime); if (res != 0) printf("Return code when recording startTime : %d\n", res); res = cudaEventCreate(&endTime); if (res != 0) printf("Return code when recording endTime : %d\n", res); } cuStopwatch::~cuStopwatch(){ cudaEventDestroy(startTime); cudaEventDestroy(endTime); } void cuStopwatch::start(){ // todo: start the stopwatch, and ignore double start if (started) { return; } cudaError_t res = cudaEventRecord(startTime); if (res != 0) printf("Return code when recording startTime : %d\n", res); started = true; } float cuStopwatch::stop(){ // todo: stop the stopwatch and return elapsed time, ignore invalid stops (e.g. stop when not yet started or double stop) if (! started) { return 0; } cudaError_t res = cudaEventRecord(endTime); if (res != 0) printf("Return code when recording endTime : %d\n", res); cudaEventSynchronize(endTime); res = cudaEventElapsedTime(&elapsedTime, startTime, endTime); if (res != 0) printf("Return code when computing elapsed time : %d\n", res); started = false; return elapsedTime; }
11,790
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <cuda.h> #define N 128 #define base 0 #define THREADS_PER_BLOCK 512 __global__ void count_characters(char *buffer, int *freq, long file_size, int total_threads); void display_count(int *freq, int n); /* * Main */ int main(int argc, char *argv[]){ int blocks; int num_threads; float total_time, comp_time; cudaEvent_t total_start, total_stop, comp_start, comp_stop; cudaEventCreate(&total_start); cudaEventCreate(&total_stop); cudaEventCreate(&comp_start); cudaEventCreate(&comp_stop); FILE *pFile; long file_size; char * buffer; char * filename; size_t result; int * freq; if (argc != 2) { printf ("Usage : %s <file_name>\n", argv[0]); return 1; } filename = argv[1]; pFile = fopen ( filename , "rb" ); if (pFile==NULL) {printf ("File error\n"); return 2;} /* obtain file size */ fseek (pFile , 0 , SEEK_END); file_size = ftell (pFile); rewind (pFile); printf("file size is %ld\n", file_size); /* allocate memory to contain the file */ buffer = (char*) malloc (sizeof(char)*file_size); if (buffer == NULL) {printf ("Memory error\n"); return 3;} /* copy the file into the buffer */ result = fread (buffer,1,file_size,pFile); if (result != file_size) {printf ("Reading error\n"); return 4;} freq = (int*) malloc(sizeof(int)*N); if (freq == NULL) {printf ("Memory error\n"); return 5;} /* * Memory allocation on device */ char *buff_dev; int *freq_dev; cudaMalloc((void **)&buff_dev, file_size*sizeof(char)); cudaMalloc((void **)&freq_dev, N*sizeof(int)); cudaMemset(freq_dev, 0, N); cudaEventRecord(total_start); /* * Copy buffer from host memory to device memory */ cudaMemcpy(buff_dev, buffer, sizeof(char)*file_size, cudaMemcpyHostToDevice); /* * Create sufficient blocks */ blocks = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK; /* * Calculate number of threads */ num_threads = blocks * THREADS_PER_BLOCK; cudaEventRecord(comp_start); /* * Kernel call */ count_characters<<< blocks*2, N >>>(buff_dev, freq_dev, file_size, num_threads); cudaEventRecord(comp_stop); cudaEventSynchronize(comp_stop); cudaEventElapsedTime(&comp_time, comp_start, comp_stop); /* * Copy c from host device memory to host memory */ cudaMemcpy(freq, freq_dev, N*sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(total_stop); cudaEventSynchronize(total_stop); cudaEventElapsedTime(&total_time, total_start, total_stop); /* * Free memory on device */ cudaFree(buff_dev); cudaFree(freq_dev); cudaEventDestroy(comp_start); cudaEventDestroy(comp_stop); cudaEventDestroy(total_start); cudaEventDestroy(total_stop); /* * Display Results */ display_count(freq, N); /* * GPU timing */ printf("N: %d, blocks: %d, total_threads: %d\n", N, blocks, THREADS_PER_BLOCK*blocks); printf("Total time (ms): %f\n", total_time); printf("Kernel time (ms): %f\n", comp_time); printf("Data transfer time (ms): %f\n", total_time-comp_time); return 0; } /* * Function: count_characters * -------------------- * Counts the frequency of each character (atomic operation, freq array) * * buffer: pointer to char array that contains the txt file * freq: pointer to int array that will contain the frequency of each character * file_size: the size of the file (long number) * total_threads: calculated total threads (int) * */ __global__ void count_characters(char *buffer, int *freq, long file_size, int total_threads){ int index = threadIdx.x + blockIdx.x * blockDim.x; long i; for (i=index; i<file_size; i+=total_threads) atomicAdd(&(freq[buffer[i] - base]), 1); } void display_count(int *freq, int n){ int j; for (j=0; j<n; ++j) (void) printf("%d = %d\n", j+base, freq[j]); }
11,791
#include "FloatMatrix.cuh" #include <stdio.h> #include "cuda_header.cuh" FloatMatrix::FloatMatrix(long width, long height, FLOAT_MAT_TYPE* data) :Matrix(width, height) { this->data = data; } FLOAT_MAT_TYPE * FloatMatrix::getData() { return data; } FloatMatrix FloatMatrix::operator*(FloatMatrix& b) { FLOAT_MAT_TYPE* data = cublas_mul_matrix(this->getWidth(), this->getHeight(), this->getData(), b.getWidth(), b.getHeight(), b.getData()); return FloatMatrix(b.getWidth(), this->getHeight(), data); } bool FloatMatrix::operator==(FloatMatrix& b) { if (this->getWidth() != b.getWidth() || this->getHeight() != b.getHeight()) return false; for (unsigned long int i = 0; i < this->getWidth() * this->getHeight(); i++) if (this->data[i] != b.getData()[i]) { printf("Expected %f, got %f", data[i], b.getData()[i]); return false; } return true; } FloatMatrix::~FloatMatrix() { if (this->data) delete this->data; }
11,792
#include <cuda.h> #include <stdio.h> #include <iostream> #include <chrono> using namespace std; __global__ void add(int *a, int *b) { for(int i = 0; i < 1000000; i++) a[i] += b[i]; } void addCPU(int *a, int *b) { for(int i = 0; i < 1000000; i++) a[i] += b[i]; } int main() { std::chrono::_V2::system_clock::time_point checkpoint[10]; int n = 1000000; int *a = (int *)malloc(n * sizeof(int)); int *b = (int *)malloc(n * sizeof(int)); int *da, *db; srand(time(NULL)); for(int i = 0; i < n; i++) { a[i] = rand() % 152; b[i] = rand() % 314; } cudaMalloc(&da, sizeof(int) * n); cudaMalloc(&db, sizeof(int) * n); cudaMemcpy(da, a, sizeof(int) * n, cudaMemcpyHostToDevice); cudaMemcpy(db, b, sizeof(int) * n, cudaMemcpyHostToDevice); checkpoint[0] = std::chrono::high_resolution_clock::now(); add<<<1,1>>>(da, db); cudaDeviceSynchronize(); checkpoint[1] = std::chrono::high_resolution_clock::now(); addCPU(a, b); checkpoint[2] = std::chrono::high_resolution_clock::now(); auto x = std::chrono::duration_cast<std::chrono::microseconds>(checkpoint[1] - checkpoint[0]).count(); auto y = std::chrono::duration_cast<std::chrono::microseconds>(checkpoint[2] - checkpoint[1]).count(); cout << "Your single CPU thread is "<<((float)x)/y<<" times faster than your single GPU thread."<< endl; return 0; }
11,793
#include "includes.h" __global__ void max_abs_diff(float* diff, const float* output1, const float* output2, const int size) { extern __shared__ float s_max[]; int i = blockDim.x*blockIdx.x + threadIdx.x; int tx = threadIdx.x; if (i<size) { float o1 = output1[i]; if (o1 == -1) { s_max[tx] = -1; } else { s_max[tx] = fabsf(o1 - output2[i]); } } else { s_max[tx] = -1; } for (int j = blockDim.x / 2; j> 0; j >>= 1) { __syncthreads(); if (tx<j) { s_max[tx] = fmaxf(s_max[tx], s_max[tx + j]); } } if (tx == 0) { diff[blockIdx.x] = s_max[0]; } }
11,794
extern "C" __global__ void kNMLUpdate1_kernel( int numAtoms, int paddedNumAtoms, float tau, float dt, float kT, float4 *posq, float4 *posqP, float4 *velm, long long *force, const float4 *__restrict__ random, unsigned int randomIndex ) { /* Update the velocity.*/ const float vscale = exp( -dt / tau ); const float fscale = ( 1.0f - vscale ) * tau; const float noisescale = sqrt( kT * ( 1 - vscale * vscale ) ); for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) { const float4 n = random[randomIndex + blockIdx.x * blockDim.x + threadIdx.x]; const float4 randomNoise = make_float4( n.x * noisescale, n.y * noisescale, n.z * noisescale, n.w * noisescale ); const float sqrtInvMass = sqrt( velm[atom].w ); float4 v = velm[atom]; float fx = ( float )force[atom] / ( float )0x100000000; float fy = ( float )force[atom + 1 * paddedNumAtoms] / ( float )0x100000000; float fz = ( float )force[atom + 2 * paddedNumAtoms] / ( float )0x100000000; v.x = ( vscale * v.x ) + ( fscale * fx * v.w ) + ( randomNoise.x * sqrtInvMass ); v.y = ( vscale * v.y ) + ( fscale * fy * v.w ) + ( randomNoise.y * sqrtInvMass ); v.z = ( vscale * v.z ) + ( fscale * fz * v.w ) + ( randomNoise.z * sqrtInvMass ); velm[atom] = v; } } extern "C" __global__ void kNMLUpdate2_kernel( int numAtoms, int numModes, float4 *velm, float4 *modes, float *modeWeights ) { extern __shared__ float dotBuffer[]; for( int mode = blockIdx.x; mode < numModes; mode += gridDim.x ) { /* Compute the projection of the mass weighted velocity onto one normal mode vector. */ float dot = 0.0f; for( int atom = threadIdx.x; atom < numAtoms; atom += blockDim.x ) { const int modePos = mode * numAtoms + atom; const float scale = 1.0f / sqrt( velm[atom].w ); float4 v = velm[atom]; float4 m = modes[modePos]; dot += scale * ( v.x * m.x + v.y * m.y + v.z * m.z ); } dotBuffer[threadIdx.x] = dot; __syncthreads(); if( threadIdx.x == 0 ) { float sum = 0; for( int i = 0; i < blockDim.x; i++ ) { sum += dotBuffer[i]; } modeWeights[mode] = sum; } } } extern "C" __global__ void kNMLUpdate3_kernel( int numAtoms, int numModes, float dt, float4 *posq, float4 *velm, float4 *modes, float *modeWeights, float4 *noiseVal ) { /* Load the weights into shared memory. */ extern __shared__ float weightBuffer[]; for( int mode = threadIdx.x; mode < numModes; mode += blockDim.x ) { weightBuffer[mode] = modeWeights[mode]; } __syncthreads(); /* Compute the projected velocities and update the atom positions. */ for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) { const float invMass = velm[atom].w, scale = sqrt( invMass ); float3 v = make_float3( 0.0f, 0.0f, 0.0f ); for( int mode = 0; mode < numModes; mode++ ) { float4 m = modes[mode * numAtoms + atom]; float weight = weightBuffer[mode]; v.x += m.x * weight; v.y += m.y * weight; v.z += m.z * weight; } v.x *= scale; v.y *= scale; v.z *= scale; velm[atom] = make_float4( v.x, v.y, v.z, invMass ); float4 pos = posq[atom]; /* Add Step */ pos.x += dt * v.x; pos.y += dt * v.y; pos.z += dt * v.z; #ifdef FAST_NOISE /* Remove Noise */ pos.x -= noiseVal[atom].x; pos.y -= noiseVal[atom].y; pos.z -= noiseVal[atom].z; #endif posq[atom] = pos; } }
11,795
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <thrust/scan.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <cstring> #include "tree_huff.cuh" #define FILE_NAME_MAX_LEN 270 #define MAX_FILE_NAME 256 #define N 32 #define M 4 #define MAX_BLOCKS 65535 #define THREADS_PER_BLOCK 128 #define WARP_SIZE 32 void add_magic_num(FILE *file); //funcion que anade un numero magico para verificar que es un archivo generado por este proceso void add_bit_vector(FILE *file, unsigned char bit_vector[32]); //funcion que anade al archivo la representacion de que caracteres estan presentes (isnerta 256 bits) int add_size(FILE *file, int freq[MAX_CHARS]);//funciona que determina el numero de bits necesarios para representar las frecuencias de los caracteres void add_character_counts(FILE *file, int freq[MAX_CHARS], int num_bytes);//funcion que inserta header con las frecuencias de los caractres //funcion para copiar strings en kernels __device__ char * my_strcpy(char *dest, const char *src){ int i = 0; while (src[i] != 0) { dest[i] = src[i]; i++; } return dest; } //kernel que escribe a un arreglo de chars la representacion en bits de los caracteres scadaos del archivo __global__ void encode_byte_stream(char * string, code * code_values, char * res, int * offset, int f_size) { int i = blockIdx.x *blockDim.x + threadIdx.x; if(i < f_size) { my_strcpy(res + offset[i], code_values[string[i]].path); //printf("hola\n"); } } //kernel que compacta 8 caracteres (0 o 1) en un caracter o byte __global__ void compressed_bit_stream(char * encoded_byte_stream, unsigned char * res, int f_size) { int i = blockIdx.x *blockDim.x + threadIdx.x; if (i < f_size) { unsigned char packed = 0; for (int count = 0; count < 8; count++) { if (encoded_byte_stream[(i*8)+count] == '0') { packed <<= 1; } else { packed = (packed << 1) | 0x01; } } res[i] = packed; } } int main(int argc, char *argv[]) { //archivos FILE *file_in, *file_out; int freq[MAX_CHARS] = {0}, c = 0, count = 0, num_bytes = 0, ret = 0; unsigned char bit_vector[32] = {0x00}, packed = 0; char output_file_name[MAX_FILE_NAME] = "", s[MAX_PATH] = "", temp[2*MAX_PATH] = ""; struct node *tree_head = NULL; struct code code_values[MAX_CHARS] = {{-1, {0}, 0}}; char* texto; size_t texto_s; long f_size; // revisar que haya archivo de entrada if (argc != 2) { printf("Formato: ./huffman filename\n"); exit(1); } // abrir archivo ed entrada if ((file_in = fopen(argv[1], "r")) == NULL) { printf("No se pudo abrir el archivo.\n"); exit(1); } // sacar frecuencia de cada caracter while ((c = fgetc(file_in)) != EOF) { freq[c]++; } // cerrar archivo de entrada fclose(file_in); for (count = 0; count < MAX_CHARS; count++) { if (freq[count] > 0) { bit_vector[count / 8] |= (1 << (count % 8)); } } if ((strlen(argv[1])+strlen(".huff")) >= MAX_FILE_NAME) { printf("Input file name too long. Output file cannot be generated.\n"); exit(1); } // crear archivo de salida strncpy(output_file_name, argv[1], MAX_FILE_NAME); strncat(output_file_name, ".huff", MAX_FILE_NAME); // abrir archivo de salida if ((file_out = fopen(output_file_name, "w")) == NULL) { printf("Output file failed to open.\n"); exit(1); } add_magic_num(file_out); add_bit_vector(file_out, bit_vector); num_bytes = add_size(file_out, freq); add_character_counts(file_out, freq, num_bytes); // construir arbol tree_head = generate_tree(freq); // generar huffman codes build_codes(tree_head, code_values, s, 0); // abrir arhchivo de entrada if ((file_in = fopen(argv[1], "r")) == NULL) { printf("Failed to open the input file.\n"); exit(1); } //para tomar tiempo cudaEvent_t cpuI, cpuF; float cpuT; cudaEventCreate( &cpuI ); cudaEventCreate( &cpuF ); cudaEventRecord( cpuI, 0 ); //para sacar tamano del archivo fseek(file_in, 0, SEEK_END); f_size = ftell(file_in); fseek(file_in, 0, SEEK_SET); //string con el archivo cargado en memoria char *string = (char *)malloc(sizeof(char) * f_size); //arreglo con la longitud de la represnetacion en bits del caracter, sacado de la tabla de huffman int * offset = (int *)malloc(sizeof(int) * f_size); int i = 0; //llenar los arreglos while ((c = fgetc(file_in)) != EOF) { offset[i] = code_values[c].len; string[i] = c; i++; } char *d_string; //reservar memoria en tarjeta cudaMalloc<char>(&d_string, sizeof(char) * f_size); cudaMemcpy(d_string, string, f_size*sizeof(char), cudaMemcpyHostToDevice ); thrust::device_vector<int> d_offset(offset, offset+f_size); //prefix sum en paralelo thrust::exclusive_scan(d_offset.begin(), d_offset.end(), d_offset.begin()); // in-place scan //copiar a host thrust::copy(d_offset.begin(), d_offset.end(), offset); int last = offset[f_size - 1] + code_values[string[f_size-1]].len; //padding if (last%8 != 0) { last += 8-(last%8); } //memoria en device char * d_encoded_byte_stream; char * encoded_byte_stream = (char *)malloc(sizeof(char) * last); cudaMalloc<char>(&d_encoded_byte_stream, sizeof(char)*last); cudaMemset(d_encoded_byte_stream, '0', sizeof(char)*last); code * d_code_values; cudaMalloc<code>(&d_code_values, sizeof(code)*MAX_CHARS); cudaMemcpy(d_code_values, code_values, MAX_CHARS*sizeof(code), cudaMemcpyHostToDevice ); //sacar pointer de thrust::device_vector int * d_offset2 = thrust::raw_pointer_cast( &d_offset[0] ); //thrust::device_delete(d_offset2); int blocks; blocks = ceil((float)f_size/THREADS_PER_BLOCK); printf("blocks: %d\n", blocks); encode_byte_stream<<<blocks,THREADS_PER_BLOCK>>>(d_string, d_code_values, d_encoded_byte_stream, d_offset2, f_size); cudaFree(d_string); cudaFree(d_code_values); //cudaFree(d_offset2); //copiar a host cudaMemcpy(encoded_byte_stream, d_encoded_byte_stream, last*sizeof(char), cudaMemcpyDeviceToHost); int finalSize = last/8; //memoria en device unsigned char * d_encoded_bit_stream; //memoria en host unsigned char * encoded_bit_stream = (unsigned char *)malloc(finalSize * sizeof(unsigned char)); cudaMalloc<unsigned char>(&d_encoded_bit_stream, sizeof(unsigned char)*finalSize); blocks = ceil((float)finalSize/THREADS_PER_BLOCK); //redondea al siguiente multiplo de 128 compressed_bit_stream<<<blocks, THREADS_PER_BLOCK>>>(d_encoded_byte_stream, d_encoded_bit_stream, finalSize); cudaMemcpy(encoded_bit_stream, d_encoded_bit_stream, finalSize*sizeof(unsigned char), cudaMemcpyDeviceToHost); //escribe al archivo fwrite(encoded_bit_stream, sizeof(unsigned char), finalSize, file_out); //parar tiempo cudaEventRecord( cpuF, 0 ); cudaEventSynchronize( cpuF ); cudaEventElapsedTime( &cpuT, cpuI, cpuF); printf("Tiempo %f: ", cpuT); //FREES cudaFree(d_encoded_byte_stream); cudaFree(d_encoded_bit_stream); free(encoded_bit_stream); free(encoded_byte_stream); free(string); free(offset); // cerrar archivo entrada if ((ret = fclose(file_in)) != 0) { printf("Failed to close the input file."); } //cerrar archivo salida if ((ret = fclose(file_out)) != 0) { printf("Failed to close the output file."); } free_tree(tree_head); return 0; } void add_magic_num(FILE *file) { unsigned char magic_num[4] = {0x4C,0x70,0xF0,0x7C}; //numero magico int i = 0, ret = 0; for (i = 0; i < 4; i++) { if ((ret = fprintf(file, "%c", magic_num[i])) != 1) { printf("Failure to add magic number to output file.\n"); //no se pudo agregar al archivo exit(1); } } return; } void add_bit_vector(FILE *file, unsigned char bit_vector[32]) { unsigned char c = 0; int i = 0, ret = 0; for (i = 0; i < 32; i++) { c = 0x00; c |= ((bit_vector[i] & 0x01) << 7); c |= ((bit_vector[i] & 0x02) << 5); c |= ((bit_vector[i] & 0x04) << 3); c |= ((bit_vector[i] & 0x08) << 1); c |= ((bit_vector[i] & 0x10) >> 1); c |= ((bit_vector[i] & 0x20) >> 3); c |= ((bit_vector[i] & 0x40) >> 5); c |= ((bit_vector[i] & 0x80) >> 7); if ((ret = fprintf(file,"%c", c)) != 1) { printf("Failure to output bit vector number.\n"); exit(1); } } return; } int add_size(FILE *file, int freq[MAX_CHARS]) { int i = 0, num_bytes = 0, ret = 0; for (i = 0; i < MAX_CHARS; i++) { if (freq[i] & 0xFF000000) { num_bytes = 4; } else if ((freq[i] & 0x00FF0000) && num_bytes < 4) { num_bytes = 3; } else if ((freq[i] & 0x0000FF00) && num_bytes < 3) { num_bytes = 2; } else if ((freq[i] & 0x000000FF) && num_bytes < 2) { num_bytes = 1; } } if ((ret = fprintf(file, "%c", num_bytes)) != 1) { printf("Failure to output size of the frequency byte number.\n"); exit(1); } return num_bytes; } void add_character_counts(FILE *file, int freq[MAX_CHARS], int num_bytes) { char *ptr = 0; int i = 0, ret = 0, j = 0; for (i = 0; i < MAX_CHARS; i++) { if (freq[i] == 0) { continue; } ptr = (char *)&freq[i]; for (j = (num_bytes - 1); j >= 0 ; j--) { if ((ret = fprintf(file, "%c", ptr[j])) != 1) { printf("Failure to output the freqency byte.\n"); exit(1); } } } return; }
11,796
#include "stdio.h" #include<iostream> #include <cuda.h> #include <cuda_runtime.h> // Defining two constants. // 使用 __constant__ 修饰限定符来声明变量存储于常量存储器中。 __constant__ int constant_f; __constant__ int constant_g; #define N 5 // Kernel function for using constant memory. __global__ void gpu_constant_memory(float *device_in, float *device_out) { //Thread index for current kernel. int tid = threadIdx.x; device_out[tid] = constant_f * device_in[tid] + constant_g; } int main(int argc, char *argv[]) { // Defining Arrays for host. float host_in[N], host_out[N]; // Defining Pointers for device. float *device_in, *device_out; int h_f = 2; int h_g = 20; // allocate the memory on the device GPU. cudaMalloc((void**)&device_in, N * sizeof(float)); cudaMalloc((void**)&device_out, N * sizeof(float)); // Initializing Array for (unsigned int i = 0; i < N; ++i) { host_in[i] = i; } // Copy array data from host to device. cudaMemcpy(device_in, host_in, N * sizeof(float), cudaMemcpyHostToDevice); // Copy constants to constant memory. // CUDA API cudaMemcpyToSymbol 将常量复制到内核执行所需要的常量内存中。 // 具体参数列表以及使用例子,可以查看 CUDA 编程手册。 cudaMemcpyToSymbol(constant_f, &h_f, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(constant_g, &h_g, sizeof(int), 0, cudaMemcpyHostToDevice); // Calling kernel with one block and N threads per block. gpu_constant_memory <<<1, N >>> (device_in, device_out); // Coping result back to host from device memory. cudaMemcpy(host_out, device_out, N * sizeof(float), cudaMemcpyDeviceToHost); // Printing result on console. printf("Use of Constant memory on GPU \n"); for (unsigned int i = 0; i < N; ++i) { printf("The expression for input %f is %f\n", host_in[i], host_out[i]); } //Free up memory cudaFree(device_in); cudaFree(device_out); return 0; }
11,797
#include "includes.h" __global__ void gpu_histo_kernel_naive(u_char* Source, int *res, unsigned height, unsigned width){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { u_char val = Source[i*width+j]; atomicAdd(&res[val],1); } }
11,798
/** * demonstration of asynchronous program execution */ #include <stdio.h> __global__ void myhost(void) { printf("Hello World from the host\n"); } int main(void) { printf("Hello World! 123\n"); myhost<<<1,1>>>(); //sync threads cudaDeviceSynchronize(); return 0; }
11,799
__global__ void fma_throughput(float *out, float *scale) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; float c[16]; for (int i = 0; i < 16; ++i) { c[i] = i; } for (int i = 0; i < 2048; ++i) { c[0] += scale[0] * c[0]; c[1] += scale[1] * c[1]; c[2] += scale[2] * c[2]; c[3] += scale[3] * c[3]; c[4] += scale[4] * c[4]; c[5] += scale[5] * c[5]; c[6] += scale[6] * c[6]; c[7] += scale[7] * c[7]; c[8] += scale[8] * c[8]; c[9] += scale[9] * c[9]; c[10] += scale[10] * c[10]; c[11] += scale[11] * c[11]; c[12] += scale[12] * c[12]; c[13] += scale[13] * c[13]; c[14] += scale[14] * c[14]; c[15] += scale[15] * c[15]; } out[index] = c[0] + c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10] + c[11] + c[12] + c[13] + c[14] + c[15]; } __global__ void fma_latency(float *out, float scale) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; float c = 1.0f; for (int i = 0; i < 1024 * 1024 * 1024; ++i) { c += scale * c; } out[index] = c; }
11,800
#include "includes.h" //Training of the CNN is done using Keras. After training for 10 epochs, the obtained accuracy on the training data set is 99.70 and on the test data set is 99.14. //This model implements the following layes in order- 2DConvolution---->Maxpooling---->2D Convolution---->Maxpooling---->Fully_connected layer---->Fully_connected layer. //The image is a 28*28 greyscale image. The specifications of the layers are as follows: //Layer_0: Convolution: 32 3*3 kernels with no padding and 1 stride. //Layer_1: Maxpooling: 2*2 filters with with no padding and 1 stride. //Layer_2: Convolution: 64 3*3 kernels with no padding and 1 stride. //Layer_3: Maxpooling: 2*2 filters with with no padding and 1 stride. //Layer_4: Flattening //Layer_5: Fully connected / dense layer with 1024 output units. //Layer_6: Dropout (done during training only). //Layer_7: Fully connected / dense layer with 10 output units. //All arrays and matrices are designed to be row ordered in this implementation. //Kernel that does convolution. This convolution is done by each thread identifying that patch or portion of the image that it is responsible for its result and does the multiplication and addition of it's patche's values with the suitable kernel. //The depth of the output image is the number of kernels. //Kernel that does maxpooling. //This kernel implements the fully connected layers. __global__ void dense_kernel(int num_input, int num_output, double* gpu_in, double* weights, double* biases, double* gpu_out, int num_classes) { int tid = blockDim.x*blockIdx.x + threadIdx.x; if (tid >= num_output) return; double sum = 0.0l; //The weights are extracted from Keras such that all the weights to one output node appears together, followed by weights to the next node and so on. //Thus, each output node will be a multiply add of adjacent weight values with the input nodes. for (int count = 0; count < num_input; count++) { sum += gpu_in[count] * weights[tid*num_input + count]; } sum += biases[tid]; //Activation: If the layer is the final layer, then don't do anything, otherwise relu activation max(0,value) is taken. if ((num_output) != num_classes) { if (sum < 0.0) { sum = 0.0l; } } gpu_out[tid] = sum; }