serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
3,701
#include<stdio.h> #include<stdlib.h> #define N 4 int main(void) { int arr[N][N] = {{1,2,3,4}, {5,6,7,8}, {9,10,11,12}, {13,14,15,16}}; printf("Original 2D array: \n"); for(int i=0; i<N; i++){ for(int j=0; j<N; j++){ printf("%d ", arr[i][j]); } printf("\n"); } printf("\n Row-major layout: \n"); int *p1 = NULL; int *p2 = NULL; int *p3 = NULL; // Todo p1 = arr[0]; printf("Approach 1: (address:%p)\n", p1); for(int j=0; j<N*N; j++){ printf("%d ", *(p1+j)); } printf("\n"); // Todo p2 = *arr; printf("Approach 2: (address:%p)\n", p2); for(int j=0; j<N*N; j++){ printf("%d ", *(p2+j)); } printf("\n"); // Todo p3 = &arr[0][0]; printf("Approach 3: (address:%p)\n", p3); for(int j=0; j<N*N; j++){ printf("%d ", *(p3+j)); } return 0; }
3,702
/* -------------------------------------------------------------------- OPTIMIZED CODE MAKING USE OF REGISTERS + SHARED MEMORY ----------------------------------------------------------------------*/ #include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void heat (float * __restrict__ in, float * __restrict__ out1, int L, int M, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i-4); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j-4); int j = max (j0, 0) + (int)(threadIdx.y); //Declarations float reg_in_m1=0, __shared__ sh_in_c0[16][32], reg_in_p1=0; float reg_out_m2=0, __shared__ sh_out_m1[16][32], reg_out_c0=0; //Value Initialization if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) { reg_in_m1 = in[0 + j*N + i]; sh_in_c0[j-j0][i-i0] = in[1*M*N + j*N + i]; } //Rest of the computation for (int k=1; k<=L-2; ++k) { //Fetch new plane if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) { reg_in_p1 = in[(k+1)*M*N + j*N + i]; } __syncthreads (); if (j >= max (j0+1, 1) & j <= min (j0+blockdim_j-2, M-2) & i >= max (i0+1, 1) & i <= min (i0+blockdim_i-2, N-2)) { reg_out_c0 = ((((0.125f * ((reg_in_p1 - (2.0f * sh_in_c0[j-j0][i-i0])) + reg_in_m1)) + (0.125f * ((sh_in_c0[j-j0+1][i-i0] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0-1][i-i0]))) + (0.125f * ((sh_in_c0[j-j0][i-i0+1] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0][i-i0-1]))) + sh_in_c0[j-j0][i-i0]); } if (j >= max (j0+2, 1) & j <= min (j0+blockdim_j-3, M-2) & i >= max (i0+2, 1) & i <= min (i0+blockdim_i-3, N-2)) { out1[max(k-1,0)*M*N + j*N + i] = ((((0.125f * ((reg_out_c0 - (2.0f * sh_out_m1[j-j0][i-i0])) + reg_out_m2)) + (0.125f * ((sh_out_m1[j-j0+1][i-i0] - (2.0f * sh_out_m1[j-j0][i-i0])) + sh_out_m1[j-j0-1][i-i0]))) + (0.125f * ((sh_out_m1[j-j0][i-i0+1] - (2.0f * sh_out_m1[j-j0][i-i0])) + sh_out_m1[j-j0][i-i0-1]))) + sh_out_m1[j-j0][i-i0]); } __syncthreads (); //Value rotation if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) { reg_in_m1 = sh_in_c0[j-j0][i-i0]; sh_in_c0[j-j0][i-i0] = reg_in_p1; reg_out_m2 = sh_out_m1[j-j0][i-i0]; sh_out_m1[j-j0][i-i0] = reg_out_c0; } __syncthreads (); } } extern "C" void host_code (float *h_in, float *h_out2, int L, int M, int N) { float *in; cudaMalloc (&in, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for in\n"); cudaMemcpy (in, h_in, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *out1; cudaMalloc (&out1, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for out1\n"); float *out2; cudaMalloc (&out2, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for out2\n"); dim3 blockconfig_1 (32, 16, 1); dim3 gridconfig_1 (ceil(N, blockconfig_1.x-4), ceil(M, blockconfig_1.y-4), 1); heat <<<gridconfig_1, blockconfig_1>>> (in, out1, L, M, N); dim3 blockconfig_2 (32, 16, 1); dim3 gridconfig_2 (ceil(N, blockconfig_2.x-4), ceil(M, blockconfig_2.y-4), 1); heat <<<gridconfig_2, blockconfig_2>>> (out1, out2, L, M, N); cudaMemcpy (h_out2, out2, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost); }
3,703
/** * APPROXIMATE PATTERN MATCHING * * INF560 */ #include <string.h> #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <sys/time.h> #define APM_DEBUG 0 #define CHECK(x) \ do { \ if (!(x)) { \ fprintf(stderr, "%s:%d: ", __func__, __LINE__); \ perror(#x); \ exit(EXIT_FAILURE); \ } \ } while (0) #define DIFFTEMPS(a,b) (((b).tv_sec - (a).tv_sec) + ((b).tv_usec - (a).tv_usec)/1000000.) char * read_input_file( char * filename, int * size ) { char * buf ; off_t fsize; int fd = 0 ; int n_bytes = 1 ; /* Open the text file */ fd = open( filename, O_RDONLY ) ; if ( fd == -1 ) { fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ; return NULL ; } /* Get the number of characters in the textfile */ fsize = lseek(fd, 0, SEEK_END); if ( fsize == -1 ) { fprintf( stderr, "Unable to lseek to the end\n" ) ; return NULL ; } #if APM_DEBUG printf( "File length: %lld\n", fsize ) ; #endif /* Go back to the beginning of the input file */ if ( lseek(fd, 0, SEEK_SET) == -1 ) { fprintf( stderr, "Unable to lseek to start\n" ) ; return NULL ; } /* Allocate data to copy the target text */ buf = (char *)malloc( fsize * sizeof ( char ) ) ; if ( buf == NULL ) { fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n", fsize ) ; return NULL ; } n_bytes = read( fd, buf, fsize ) ; if ( n_bytes != fsize ) { fprintf( stderr, "Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n", fsize, n_bytes) ; return NULL ; } #if APM_DEBUG printf( "Number of read bytes: %d\n", n_bytes ) ; #endif *size = n_bytes ; close( fd ) ; return buf ; } #define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c))) int levenshtein(char *s1, char *s2, int len, int * column) { unsigned int x, y, lastdiag, olddiag; for (y = 1; y <= len; y++) { column[y] = y; } for (x = 1; x <= len; x++) { column[0] = x; lastdiag = x-1 ; for (y = 1; y <= len; y++) { olddiag = column[y]; column[y] = MIN3( column[y] + 1, column[y-1] + 1, lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1) ); lastdiag = olddiag; } } return(column[len]); } __global__ void cuda_call( int n_bytes, char *buf, char *pattern, int batch_size, int size_pattern, int *matches, int approx_factor) { /* Traverse the input data up to the end of the file */ int tid = blockIdx.x * blockDim.x + threadIdx.x; int start = tid * batch_size; int end = start + batch_size; int *column; unsigned int j; if( end > n_bytes ) end = n_bytes; if ( start < n_bytes ){ column = (int *)malloc( (size_pattern+1) * sizeof( int ) ) ; for (j = start; j < end; j++ ){ int distance = 0 ; int size = size_pattern; unsigned int x, y, lastdiag, olddiag; if ( n_bytes - j < size_pattern ){ size = n_bytes - j ; } for (y = 1; y <= size; y++) column[y] = y; for (x = 1; x <= size; x++) { column[0] = x; lastdiag = x-1 ; for (y = 1; y <= size; y++) { olddiag = column[y]; column[y] = MIN3( column[y] + 1, column[y-1] + 1, lastdiag + (pattern[y-1] == buf[ j + x-1] ? 0 : 1) ); lastdiag = olddiag; } } distance = column[size]; if ( distance <= approx_factor ) matches[tid]++ ; } // End for j } // End if start } int sum( int *matches, int size ){ int _sum = 0; for( int i = 0; i < size; i++ ) _sum += matches[i]; return _sum; } int main( int argc, char ** argv ) { char ** pattern ; char * filename ; int nb_patterns = 0 ; int i; char *buf, *dBuf; struct timeval t1, t2; double duration ; int n_bytes ; int * n_matches ; int approx_factor = 0; /* Check number of arguments */ if ( argc < 4 ) { printf( "Usage: %s approximation_factor " "dna_database pattern1 pattern2 ...\n", argv[0] ) ; return 1 ; } /* Get the distance factor */ approx_factor = atoi( argv[1] ) ; /* Grab the filename containing the target text */ filename = argv[2] ; /* Get the number of patterns that the user wants to search for */ nb_patterns = argc - 3 ; /* Fill the pattern array */ pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ; if ( pattern == NULL ) { fprintf( stderr, "Unable to allocate array of pattern of size %d\n", nb_patterns ) ; return 1 ; } /* Grab the patterns */ for ( i = 0 ; i < nb_patterns ; i++ ) { int l ; l = strlen(argv[i+3]) ; if ( l <= 0 ) { fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ; return 1 ; } pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ; if ( pattern[i] == NULL ) { fprintf( stderr, "Unable to allocate string of size %d\n", l ) ; return 1 ; } strncpy( pattern[i], argv[i+3], (l+1) ) ; } printf( "Approximate Pattern Mathing: " "looking for %d pattern(s) in file %s w/ distance of %d\n", nb_patterns, filename, approx_factor ) ; buf = read_input_file( filename, &n_bytes ) ; cudaMalloc(&dBuf, n_bytes); cudaMemcpy(dBuf, buf, n_bytes, cudaMemcpyHostToDevice); if ( buf == NULL ) { return 1 ; } printf("%d\n", n_bytes); /* Allocate the array of matches */ n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ; if ( n_matches == NULL ) { fprintf( stderr, "Error: unable to allocate memory for %ldB\n", nb_patterns * sizeof( int ) ) ; return 1 ; } /***** * BEGIN MAIN LOOP ******/ /* Timer start */ gettimeofday(&t1, NULL); /* Check each pattern one by one */ for ( i = 0 ; i < nb_patterns ; i++ ) { int size_pattern = strlen(pattern[i]) ; int *matches, *dMatches; int nb_threads = 1024, batch_size = 1000; int nb_blocks = (n_bytes / batch_size + nb_threads) / nb_threads; char *dPatern; n_matches[i] = 0 ; cudaMalloc(&dPatern, (size_pattern+1) * sizeof( char ) ); cudaMemcpy(dPatern, pattern[i], size_pattern+1, cudaMemcpyHostToDevice); matches = (int *)malloc( (nb_blocks*nb_threads) * sizeof( int ) ) ; for( int j = 0; j < (nb_blocks*nb_threads); j++ ) matches[j] = 0; cudaMalloc(&dMatches, (nb_blocks*nb_threads) * sizeof( int )); cudaMemcpy(dMatches, matches, (nb_blocks*nb_threads) * sizeof( int ), cudaMemcpyHostToDevice); cuda_call<<<nb_blocks, nb_threads>>>( n_bytes , dBuf, dPatern, batch_size, size_pattern, dMatches, approx_factor ); cudaDeviceSynchronize(); cudaMemcpy(matches, dMatches, (nb_blocks*nb_threads) * sizeof( int ), cudaMemcpyDeviceToHost); n_matches[i] = sum(matches,nb_blocks*nb_threads); free(matches); cudaFree( dPatern ); cudaFree( dMatches); } /* Timer stop */ gettimeofday(&t2, NULL); duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6); printf( "APM done in %lf s\n", duration ) ; /***** * END MAIN LOOP ******/ free(buf); cudaFree(dBuf); for ( i = 0 ; i < nb_patterns ; i++ ) { printf( "Number of matches for pattern <%s>: %d\n", pattern[i], n_matches[i] ) ; } return 0 ; }
3,704
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } void printDeviceNames() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Indeks urzadzenia: %d\n", i); printf("Nazwa urzadzenia: %s\n\n", prop.name); } } void printDevicePropertiesById(int id) { int nDevices, nProcs; cudaGetDeviceCount(&nDevices); if (id < nDevices) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, id); cudaDeviceGetAttribute(&nProcs, cudaDevAttrMultiProcessorCount, id); printf("Nazwa urzadzenia: %s\n", prop.name); printf("Ilosc multiprocesorow: %d.\n", nProcs); printf("Kompatybilnosc obliczeniowa: %d.%d.\n\n", prop.major, prop.minor); } else { printf("Urzadzenie o indeksie %d nie istnieje...\n", id); } } void reportGPUMemory() { size_t free, total; int freeMem, totalMem; cudaMemGetInfo(&free, &total); freeMem = static_cast<int>(free / 1048576); totalMem = static_cast<int>(total / 1048576); printf("---------- STAN PAMIECI ----------\n"); printf("Wolna: %d MB\nCalkowita: %d MB\nUzywana: %d MB\n\n", freeMem, totalMem, totalMem - freeMem); } void allocAndFreeMem() { reportGPUMemory(); printf("Alokowanie 16MB danych typu char...\n\n"); char* charData; cudaMallocManaged((void**)&charData, 1 << 24); reportGPUMemory(); printf("Alokowanie 128MB danych typu float...\n\n"); float* floatData; cudaMallocManaged((void**)&floatData, 1 << 27); reportGPUMemory(); printf("Zwolnienie pamieci danych typu char...\n"); cudaFree(charData); reportGPUMemory(); printf("Zwolnienie pamieci danych typu float...\n"); cudaFree(floatData); reportGPUMemory(); } void hostAndDeviceTransfer(float *cpuTimeGlobal, float *gpuTimeGlobal, int nElements) { cudaEvent_t startHostTransfer, stopHostTransfer; cudaEvent_t startDevTransfer, stopDevTransfer; float cpuTime = 0, gpuTime = 0; int *deviceArray; int *hostArray = (int*)malloc(nElements * sizeof(int)); cudaMalloc((int**)&deviceArray, nElements * sizeof(int)); cudaEventCreate(&startDevTransfer); cudaEventCreate(&stopDevTransfer); cudaEventRecord(startDevTransfer); cudaError_t deviceToHost = cudaMemcpy(deviceArray, hostArray, nElements * sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(stopDevTransfer); cudaEventSynchronize(stopDevTransfer); if (deviceToHost != cudaSuccess) { printf("Nie udalo sie przekopiowac danych z GPU do CPU!\n"); } else { printf("Kopiowanie z GPU do CPU powiodlo sie!\n"); cudaEventElapsedTime(&gpuTime, startDevTransfer, stopDevTransfer); printf("Czas kopiowania danych: %fs\n\n", gpuTime); *gpuTimeGlobal = gpuTime; } cudaEventCreate(&startHostTransfer); cudaEventCreate(&stopHostTransfer); cudaEventRecord(startHostTransfer); cudaError_t hostToDevice = cudaMemcpy(hostArray, deviceArray, nElements * sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(stopHostTransfer); cudaEventSynchronize(stopHostTransfer); if (hostToDevice != cudaSuccess) { printf("Nie udalo sie przekopiowac danych z CPU do GPU!\n"); } else { printf("Kopiowanie z CPU do GPU powiodlo sie!\n"); cudaEventElapsedTime(&cpuTime, startHostTransfer, stopHostTransfer); printf("Czas kopiowania danych: %fs\n\n", cpuTime); *cpuTimeGlobal = cpuTime; } } float computeAverageValue(float *array) { float sum = 0.0; for (int i = 0; i < sizeof(array); i++) { sum += array[i]; } return sum / sizeof(array); } int main() { // Zadanie 1 printDeviceNames(); // Zadanie 2 int id; printf("Wprowadz indeks urzadzenia, aby wyswietlic jego parametry: "); scanf("%d", &id); printDevicePropertiesById(id); // Zadanie 3 allocAndFreeMem(); // Zadanie 4, 5, 6 int nElements = 1024 * 1024; float cpuTime[10] = { 0.0 }, gpuTime[10] = { 0.0 }; for (int i = 0; i < 10; i++) { hostAndDeviceTransfer(&cpuTime[i], &gpuTime[i], nElements); } unsigned int bytes = nElements * sizeof(int); float avgCpySpeedCPU = (bytes * 1e-6) / computeAverageValue(cpuTime); float avgCpySpeedGPU = (bytes * 1e-6) / computeAverageValue(gpuTime); printf("\n-----------------------------------------------\n"); printf("Rozmiar kopiowanych danych: %d MB\n", bytes / (1024 * 1024)); printf("Srednia predkosc kopiowania dla CPU [GB/s]: %f\n", avgCpySpeedCPU); printf("Srednia predkosc kopiowania dla GPU [GB/s]: %f\n", avgCpySpeedGPU); return 0; }
3,705
/*** Implementation of Spatial Transformer Networks[1] Under Simplified BSD License by Che-Wei Lin [1] Max Jaderberg et al. Spatial Transformer Networks. NIPS 2015 ***/ __global__ void AffineForward(const float* bottom_data, const int* bs, const float* affine, const int len, float* top_data) { // bs = bottomSize int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) return; // get current index, [h,w,c,n] int h = index % bs[0]; int w = (index / bs[0]) % bs[1]; int c = (index / bs[0] / bs[1]) % bs[2]; int n = index / bs[0] / bs[1] / bs[2]; // get current affine start index const float* a = affine + n*6; // calc bottom index // [a0 a1 0] // [x y 1] = [u v 1] *[a2 a3 0] // [a4 a5 1] float nw = 2.0*((float)w/(float)bs[1]-0.5); //-1~1 float nh = 2.0*((float)h/(float)bs[0]-0.5); //-1~1 float w_new = ((a[0]*nw + a[2]*nh + a[4])/2.0+0.5)*(float)bs[1]; float h_new = ((a[1]*nw + a[3]*nh + a[5])/2.0+0.5)*(float)bs[0]; // calc neighbor pixel index, if > size or < size, do float v = 0.0; for (int x = floor(w_new); x <= ceil(w_new); x++) { for (int y = floor(h_new); y <= ceil(h_new); y++) { if (x < 0 || x>= bs[1] || y < 0 || y >= bs[0]){ v = 0.0; }else{ v = bottom_data[n*len/bs[3] + c*bs[1]*bs[0] + x*bs[0] + y]; } top_data[index] += v * (1-abs(w_new - (float)x)) * (1-abs(h_new - (float)y)); } } } __global__ void AffineBackward(const float* bottom_data, const int* bs, const float* affine, const int len, const float* top_data, const float* top_diff, float* bottom_diff1, float* bottom_diff2) { // bs = bottomSize int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) return; // get current index, [h,w,c,n] int h = index % bs[0]; int w = (index / bs[0]) % bs[1]; int c = (index / bs[0] / bs[1]) % bs[2]; int n = index / bs[0] / bs[1] / bs[2]; // get current affine start index const float* a = affine + n*6; // calc bottom index // [a0 a1 0] // [x y 1] = [u v 1] *[a2 a3 0] // [a4 a5 1] float nw = 2.0*((float)w/(float)bs[1]-0.5); //-1~1 float nh = 2.0*((float)h/(float)bs[0]-0.5); //-1~1 float w_new = ((a[0]*nw + a[2]*nh + a[4])/2.0+0.5)*(float)bs[1]; float h_new = ((a[1]*nw + a[3]*nh + a[5])/2.0+0.5)*(float)bs[0]; float u = 0.0; float dx = 0.0; float dy = 0.0; for (int x = max(floor(w_new),0.0); x <= min(ceil(w_new),(float)bs[1]); x++) { for (int y = max(floor(h_new),0.0); y <= min(ceil(h_new),(float)bs[0]); y++) { u = bottom_data[n*bs[2]*bs[1]*bs[0] + c*bs[1]*bs[0] + x*bs[0] + y]; atomicAdd(bottom_diff1 + (n*bs[2]*bs[1]*bs[0] + c*bs[1]*bs[0] + x*bs[0] + y), top_diff[index] * (1-abs(w_new - (float)x)) * (1-abs(h_new - (float)y)) ); dx += u * (1-abs(h_new - (float)y)) * ((float)x >= w_new ? 1.0:-1.0 ); dy += u * (1-abs(w_new - (float)x)) * ((float)y >= h_new ? 1.0:-1.0 ); } } atomicAdd((bottom_diff2+n*6)+0, nw *dx*top_diff[index]); atomicAdd((bottom_diff2+n*6)+2, nh *dx*top_diff[index]); atomicAdd((bottom_diff2+n*6)+4, 1.0*dx*top_diff[index]); atomicAdd((bottom_diff2+n*6)+1, nw *dy*top_diff[index]); atomicAdd((bottom_diff2+n*6)+3, nh *dy*top_diff[index]); atomicAdd((bottom_diff2+n*6)+5, 1.0*dy*top_diff[index]); }
3,706
#include "includes.h" __device__ int index(int x, int y, int width) { return (y * width) + x; } __device__ const int FILTER_SIZE = 9; __device__ const int FILTER_HALFSIZE = FILTER_SIZE >> 1; __device__ void sort_bubble(float *x, int n_size) { for (int i = 0; i < n_size - 1; i++) { for(int j = 0; j < n_size - i - 1; j++) { if (x[j] > x[j+1]) { float temp = x[j]; x[j] = x[j+1]; x[j+1] = temp; } } } } __global__ void median_filter_2d(unsigned char* input, unsigned char* output, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if((x<width) && (y<height)) { const int color_tid = index(x,y,width); float windowMedian[MAX_WINDOW*MAX_WINDOW]; int windowElements = 0; for (int x_iter = x - FILTER_HALFSIZE; x_iter <= x + FILTER_HALFSIZE; x_iter ++) { for (int y_iter = y - FILTER_HALFSIZE; y_iter <= y + FILTER_HALFSIZE; y_iter++) { if (0<=x_iter && x_iter < width && 0 <= y_iter && y_iter < height) { windowMedian[windowElements++] = input[index(x_iter,y_iter,width)]; } } } sort_bubble(windowMedian,windowElements); output[color_tid] = windowMedian[windowElements/2]; } }
3,707
#include "includes.h" __global__ void kernel_add_regularization_term(double * d_input_vector, int dimension, double regularization_parameter, double * d_rv) { if (threadIdx.x == 0) { double sum = 0; for (int i = 1; i < dimension; ++i) { sum += 0.5 * d_input_vector[i] * d_input_vector[i] * regularization_parameter; } *d_rv += sum; } }
3,708
#include "includes.h" __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } __global__ void lower_right(int *dst, int *input_itemsets, int *reference, int max_rows, int max_cols, int i, int penalty) { int r, c; r = blockIdx.y*blockDim.y+threadIdx.y+i+1; c = blockIdx.x*blockDim.x+threadIdx.x+i+1; if( r >= max_rows || c >= max_cols) return; if( r == (max_cols - c + i)) { dst[r*max_cols+c] = maximum( input_itemsets[(r-1)*max_cols+c-1]+ reference[r*max_cols+c], input_itemsets[r*max_cols+c-1] - penalty, input_itemsets[(r-1)*max_cols+c] - penalty); } }
3,709
#include "includes.h" __global__ void add(int *fData, int *sData, int *oData, int x, int y){ int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < x*y; i += stride) { oData[i] = fData[i] + sData[i]; } }
3,710
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <time.h> #include <curand_kernel.h> #include <curand.h> #include <sys/time.h> #define SEED 921 #define TPB 256 #define NUM_ITER 100000000 #define NUM_THREADS 10000 #define NUM_ITER_THREADS (NUM_ITER/NUM_THREADS) double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } __global__ void count_nom(int *d_res, curandState *states){ const int idx = threadIdx.x + blockIdx.x*blockDim.x; double x,y,z; const int a = 1; if (idx >= NUM_THREADS) return; const int s_idx = threadIdx.x; __shared__ int s_prod[TPB]; for(int i = 0; i < TPB; i++){ s_prod[i] = 0; } int seed = idx; // different seed per thread curand_init(seed, idx, 0, &states[idx]); for (int iter = 0; iter < NUM_ITER_THREADS; iter++) { x = curand_uniform (&states[idx]); y = curand_uniform (&states[idx]); z = sqrt((x*x) + (y*y)); if (z <= 1.0) { s_prod[s_idx]++; } } __syncthreads(); if (s_idx == 0) { int blockSum = 0; for (int j = 0; j < blockDim.x; ++j) { blockSum += s_prod[j]; } atomicAdd(d_res, blockSum); } } int main(int argc, char* argv[]) { double pi; double start_time, stop_time, diference; int grid = (NUM_THREADS + TPB - 1)/ TPB; int *d_res; int *count = (int*)malloc(sizeof(int)); cudaMalloc(&d_res, sizeof(int)); srand(SEED); // Important: Multiply SEED by "rank" when you introduce MPI! curandState *dev_random; cudaMalloc((void**)&dev_random, grid*TPB*sizeof(curandState)); // Calculate PI following a Monte Carlo method start_time = cpuSecond(); count_nom<<<grid, TPB>>>(d_res, dev_random); cudaDeviceSynchronize(); cudaMemcpy(count, d_res,sizeof(int), cudaMemcpyDeviceToHost); stop_time = cpuSecond(); diference = stop_time - start_time; // Estimate Pi and display the result pi = ((double)count[0] / (double)(NUM_ITER_THREADS * NUM_THREADS)) * 4.0; printf("The result is %f\n", pi); printf("The execution time is %f\n", diference); return 0; }
3,711
#include <stdio.h> #include <stdlib.h> #include <math.h> #define BLOCK_SIZE 16 #define MAX(i,j) ( (i)<(j) ? (j):(i) ) #define MIN(i,j) ( (i)<(j) ? (i):(j) ) #define SubArrayA(x,y) subArrayA[(x)*BLOCK_SIZE+(y)] #define InputArrayA(x,y) inputArrayA[(x)*BLOCK_SIZE+(y)] #define ImageOut(x,y) imageOut[(x)*imageSize+(y)] __device__ float gaussianDistance(float *inputArrayA,int xj,int yj,int halfPatchSWidth); __device__ float weightingFunct(float *inputArrayA,int xj,int yj,int halfPatchSWidth,float sigma,float Zi); __device__ float normFactor(float *inputArrayA,int halfPatchSWidth,float sigma); __device__ float nonLocalMeans(float *inputArrayA,float *imageOut,int halfPatchSWidth,int imageSize,float sigma); __global__ void mainGpuFunction(float const * const inputArray,float *imageOut,int halfPatchSWidth,int imageSize,float sigma) { //Πάρε τις συντεταγμένες του αντίστοιχου pixel που κάνουμε αποθορυβοποίηση int xi = blockIdx.x * blockDim.x + threadIdx.x; int yi = blockIdx.y * blockDim.y + threadIdx.y; //Πρέπει να είμαι μέσα στα όρια της εικόνας if((xi<imageSize)&&(yi<imageSize)){ __shared__ float subArrayA[BLOCK_SIZE*BLOCK_SIZE]; //Κάθε ένα απο τα thread θα φορτώσει μία τιμή του πίνακα τιμών των pixel //απο την global μνήμη στην shared μνημη SubArrayA(threadIdx.x,threadIdx.y)=inputArray[xi*imageSize+yi]; __syncthreads(); //Το κάθε νήμα εφαρμόζει τον αλγόριθμο για το τμήμα της εικόνας που φόρτωσε στην μνήμη shared ImageOut(xi,yi)=nonLocalMeans(subArrayA,imageOut,halfPatchSWidth,imageSize,sigma); }//Τέλος if } __device__ float nonLocalMeans(float *inputArrayA,float *imageOut,int halfPatchSWidth,int imageSize,float sigma){ float ww=0; float Zi=normFactor(inputArrayA,halfPatchSWidth,sigma); //Υπολογίζουμε την τιμή Z(i) //Τα αθροίσματα w(i,j)*f(j) for(int xj=0;xj<BLOCK_SIZE;xj++) { for(int yj=0;yj<BLOCK_SIZE;yj++) { ww+=weightingFunct(inputArrayA,xj,yj,halfPatchSWidth,sigma,Zi)*InputArrayA(xj,yj); //w(i,j)*f(j) } } return(ww); } //Η μεταβλητή w(i,j)=w([xi,yi] [xj,yj]) __device__ float weightingFunct(float *inputArrayA,int xj,int yj,int halfPatchSWidth,float sigma,float Zi){ float distance=gaussianDistance(inputArrayA,xj,yj,halfPatchSWidth); return ( ( exp(-(distance/(sigma*sigma))) )/Zi); } //Η μεταβλητη Z(i)=Z(xi,yi) __device__ float normFactor(float *inputArrayA,int halfPatchSWidth,float sigma){ float square_sigma=sigma*sigma; float z=0; for(int i=0;i<BLOCK_SIZE;i++) { for(int j=0;j<BLOCK_SIZE;j++) { float distance=gaussianDistance(inputArrayA,i,j,halfPatchSWidth); z+=exp(-(distance/square_sigma) ); } } return (z); } //Υπολογισμός της διαφοράς |f(Ni)-f(Nj)| //Χρησιμοποιούμε Gaussian Euclidean Distance __device__ float gaussianDistance(float *inputArrayA,int xj,int yj,int halfPatchSWidth){ int xi=threadIdx.x; int yi=threadIdx.y; //Ο δείκτης i αναφέρεται στο pixel του οποίο υπολογίζουμε την νέα τιμή //ενώ ο δείκτης j στα υπόλοιπα pixel με τα οποία γίνεται σύγκριση int ai; int bi; int aj; int bj; int SumWeight=0; //Άθροισμα βαρών float distance=0;//Συνολική διαφορά γειτονιάς pixel float diff=0; //Διαφορά μεταξύ 2 pixel γειτόνων for(int i=-halfPatchSWidth;i<=halfPatchSWidth;i++) { for(int j=-halfPatchSWidth;j<=halfPatchSWidth;j++) { ai=xi+i; bi=yi+j; aj=xj+i; bj=yj+j; if((aj<0)||(aj>=BLOCK_SIZE)) aj=xj-i; if((bj<0)||(bj>=BLOCK_SIZE)) bj=yj-j; if((ai<0)||(ai>=BLOCK_SIZE)) ai=xi-i; if((bi<0)||(bi>=BLOCK_SIZE)) bi=yi-j; if (ai!=xi || bi!=yi)//Κάνε σύγκριση για όλη την γειτονιά εκτώς του κεντρικού { int weight=1/(MAX(ai-xi,xi-ai)+MAX(bi-yi,yi-bi)); SumWeight+=weight; diff=InputArrayA(ai,bi)-InputArrayA(aj,bj); distance+=diff*diff*weight; } } } return (distance/SumWeight); }
3,712
#include "includes.h" __global__ void square_i32 (int* vector, int* output, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { output[idx] = vector[idx] * vector[idx]; } }
3,713
#include <iostream> #include <iomanip> #include <sys/time.h> #include <cuda.h> // (*) include curand device library #include <curand_kernel.h> using namespace std; __global__ void setup_kernel(curandState *state, int init) { int id = threadIdx.x + blockIdx.x * blockDim.x; // (*) initialize curand generator // Each thread gets different seed, same sequence number curand_init(id+init, 0, 0, &state[id]); // Each thread gets same seed, different sequence number //curand_init(init, id, 0, &state[id]); } __global__ void walk(curandState *state, double *result) { extern __shared__ double smem[]; int gid = threadIdx.x + blockIdx.x * blockDim.x; int id = threadIdx.x; // (*) generate uniform random numbers between 0 and 1 smem[id] = curand_uniform_double(&state[gid]); __syncthreads(); // reduction in shared memory for(int s=blockDim.x/2; s>0; s>>=1) { if (id < s) { smem[id] += smem[id + s]; } __syncthreads(); } // copy block result to result array if (id == 0) result[blockIdx.x] = smem[id]; } int main(int argc, char *argv[]) { // time variables time_t sTime = time(NULL); struct timeval tt1, tt2, tt3, tt4; int ms; double fms; // number of steps int n = 1048576; // runtime configuration parameters int nThreads = 1024; int nBlocks = n/nThreads; // data and result arrays double *devResult, *hostResult; // host memory allocation hostResult = (double*)calloc(nBlocks,sizeof(double)); // device memory allocation cudaMalloc((void**)&devResult, nBlocks*sizeof(double)); // (*) allocate space for curand states on device curandState *devStates; cudaMalloc((void**)&devStates, nBlocks*nThreads*sizeof(curandState)); cudaThreadSynchronize(); gettimeofday( &tt1, NULL ); // (*) run setup kernel setup_kernel <<< nBlocks, nThreads >>> (devStates,time(NULL)); cudaThreadSynchronize(); gettimeofday( &tt2, NULL ); // random walk kernel run walk <<< nBlocks, nThreads, nThreads*sizeof(double) >>> (devStates, devResult); cudaThreadSynchronize(); gettimeofday( &tt3, NULL ); // transfer results from device cudaMemcpy(hostResult, devResult, nBlocks*sizeof(double), cudaMemcpyDeviceToHost); // summation of block results double total = 0.0; for (int i=0; i<nBlocks; i++) { total += hostResult[i]; } cudaThreadSynchronize(); gettimeofday( &tt4, NULL ); // time calculation ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.0; cout << "Initialization time = " << fms << endl; ms = (tt3.tv_sec - tt2.tv_sec); ms = ms * 1000000 + (tt3.tv_usec - tt2.tv_usec); fms = ((double)ms)/1000000.0; cout << "Random walk time = " << fms << endl; ms = (tt4.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt4.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.0; cout << "Total time = " << fms << endl; // screen output of result cout << "Total distance = " << setprecision(9) << total << " in " << n << " steps." << endl; cout << "Expected distance = " << n/2 << endl; // cleanup cudaFree(devResult); cudaFree(devStates); free(hostResult); }
3,714
/*********************************************************************** ! Version 1: 04/16 SCR * ! * ! CUDA/C optimization of: * ! * ! SNSWP3D - This routine calculates angular fluxes for a single * ! direction for an upstream corner-balance spatial * ! discretization in 3D. * ! * ! Input: * ! * ! Output: * ! * ! Questions: * ! 1. Should the hyperplane pass over zones or corners? * ! A: Corners * ! 2. * ! * !**********************************************************************/ /* TODO: explicit setting of #hyperplanes */ #include<stdio.h> //#include<omp.h> //#include"nvToolsExt.h" #define GROUPSET 16 #define NUMFACES 3 #define NUMSM 90 #define fouralpha 1.82 #define fouralpha4 5.82 //#include "snutil.cu" //#include "snsweep.cu" #define Connect(a,b,c) Connect[ a + 3 * ( b + mC * c ) ] extern "C" { void callCudaHostMalloc (); // void CC_sweep( __global__ void GPU_sweep( int size_maxCorner, int size_maxcf, int nAngle, int nzones, int ncornr, int Groups, int nbelem, int* Angle, double* soa_omega, int* nextZ, int* next, int* soa_nCorner, int* soa_nCFaces, int* soa_c0, double* soa_STotal, double* soa_STime, double* soa_SigtInv, double* soa_Volume, double* soa_Sigt, double* soa_A_fp, double* soa_A_ez, int* soa_Connect, double* psic, double* psib ); /* Simple C file to handle calling CUDA. Easily accessible from Fortran. This removes any need to compile any other part of the program with nvcc. Just this file needs nvcc. Everything else just uses regular intel compilers. Simple. */ void callcudasub ( int *numzones, int *numgroups, int *ncornr, int *numAngles, int *AngleOrder, int *maxcorners, int *maxfaces, int *octant, //=binRecv int *NangBin, int *nbelem, double *omega, int *nCorner, int *nCFaces, int *c0, double *A_fp, double *A_ez, int *Connect, double *STotal, double *STime, double *Volume, double *psic, double *psib, int *next, int *nextZ, double *Sigt, double *SigtInv ) { //dump data to check static int dump_cnt=0; int zone,ic; static int* d_AngleOrder; static double* d_omega; static int* d_nextZ; static int* d_next; static int* d_nCorner; static int* d_nCFaces; static int* d_c0; static double* d_STotal; static double* d_STime; static double* d_SigtInv; static double* d_Volume; static double* d_Sigt; static double* d_A_fp; static double* d_A_ez; static int* d_Connect; static double* d_psic; static double* d_psib; int nZ = *numzones; int nA = *numAngles; int mC = *maxcorners; int mF = *maxfaces; int nG = *numgroups; int nC = *ncornr; int nBe = *nbelem; if ( dump_cnt < 5 ) { // for(zone=0;zone<nZ;zone++) // { // for(ic=0;ic<nCorner[zone]; ic++) // { // printf(" zone,corner,connect3 = %d,%d,%d \n",zone,ic,Connect(2,ic,zone) ); // } // } printf("max faces=%d\n",mF); if ( dump_cnt == 0 ) { cudaMalloc(&d_AngleOrder,sizeof(int)*8*nA); cudaMalloc(&d_omega,sizeof(double)*3*nA); cudaMalloc(&d_nextZ,sizeof(int)*nZ*nA); cudaMalloc(&d_next,sizeof(int)*(nC+1)*nA); cudaMalloc(&d_nCorner,sizeof(int)*nZ); cudaMalloc(&d_nCFaces,sizeof(int)*nZ); cudaMalloc(&d_c0,sizeof(int)*nZ); cudaMalloc(&d_STotal,sizeof(double)*nZ*nG*mC); cudaMalloc(&d_STime,sizeof(double)*nZ*nA*nG*mC); cudaMalloc(&d_SigtInv,sizeof(double)*nZ*nG); cudaMalloc(&d_Volume,sizeof(double)*nZ*mC); cudaMalloc(&d_Sigt,sizeof(double)*nZ*nG); cudaMalloc(&d_A_fp,sizeof(double)*3*nZ*mC*mF); cudaMalloc(&d_A_ez,sizeof(double)*3*nZ*mC*mF); cudaMalloc(&d_Connect,sizeof(int)*3*nZ*mC*mF); cudaMalloc(&d_psic,sizeof(double)*nG*nC*nA); cudaMalloc(&d_psib,sizeof(double)*nG*nBe*nA); } cudaMemcpy(d_AngleOrder,AngleOrder,sizeof(int)*8*nA,cudaMemcpyHostToDevice); cudaMemcpy(d_omega,omega,sizeof(double)*3*nA,cudaMemcpyHostToDevice); cudaMemcpy(d_nextZ,nextZ,sizeof(int)*nZ*nA,cudaMemcpyHostToDevice); cudaMemcpy(d_next,next,sizeof(int)*(nC+1)*nA,cudaMemcpyHostToDevice); cudaMemcpy(d_nCorner,nCorner,sizeof(int)*nZ,cudaMemcpyHostToDevice); cudaMemcpy(d_nCFaces,nCFaces,sizeof(int)*nZ,cudaMemcpyHostToDevice); cudaMemcpy(d_c0,c0,sizeof(int)*nZ,cudaMemcpyHostToDevice); cudaMemcpy(d_STotal,STotal,sizeof(double)*nZ*nG*mC,cudaMemcpyHostToDevice); cudaMemcpy(d_STime,STime,sizeof(double)*nZ*nA*nG*mC,cudaMemcpyHostToDevice); cudaMemcpy(d_SigtInv,SigtInv,sizeof(double)*nZ*nG,cudaMemcpyHostToDevice); cudaMemcpy(d_Volume,Volume,sizeof(double)*nZ*mC,cudaMemcpyHostToDevice); cudaMemcpy(d_Sigt,Sigt,sizeof(double)*nZ*nG,cudaMemcpyHostToDevice); cudaMemcpy(d_A_fp,A_fp,sizeof(double)*3*nZ*mC*mF,cudaMemcpyHostToDevice); cudaMemcpy(d_A_ez,A_ez,sizeof(double)*3*nZ*mC*mF,cudaMemcpyHostToDevice); cudaMemcpy(d_Connect,Connect,sizeof(int)*3*nZ*mC*mF,cudaMemcpyHostToDevice); cudaMemcpy(d_psib,psib,sizeof(double)*nG*nBe*nA,cudaMemcpyHostToDevice); cudaMemcpy(d_psic,psic,sizeof(double)*nG*nC*nA,cudaMemcpyHostToDevice); // // GPU_sweep<<<nA,32>>>( mC, mF, // nA, nZ, nC, nG, nBe, d_AngleOrder, d_omega, d_nextZ, d_next, d_nCorner, d_nCFaces, d_c0, d_STotal, d_STime, d_SigtInv, d_Volume, d_Sigt, d_A_fp, d_A_ez, d_Connect, d_psic, d_psib ); cudaMemcpy(psic,d_psic,sizeof(double)*nG*nC*nA,cudaMemcpyDeviceToHost); // CC_sweep( // *maxcorners, // mF, //*maxfaces, // *numAngles, // *numzones, // nC, // *numgroups, // nBe, // AngleOrder, // omega, // nextZ, // next, // nCorner, // nCFaces, // c0, // STotal, // STime, // SigtInv, // Volume, // Sigt, // A_fp, // A_ez, // Connect, // psic, // psib // ); dump_cnt++; } } //callcuasub } // extern C
3,715
#include <stdio.h> __global__ void testFunc() { //int x = blockIdx.x*blockDim.x + threadIdx.x; return; } __global__ void modifyMeshGPU(float a_fpTime, float* a_pfpMesh) { int x = blockIdx.x*blockDim.x + threadIdx.x; if(x<9) { if(0 == x%3) { a_pfpMesh[x] = a_pfpMesh[x] + 0.5f * cos(a_fpTime); } } return; } extern "C" void cuda_testFunc() { dim3 dimBlock = dim3(16, 16); dim3 dimGrid = dim3(1, 1); testFunc<<<dimBlock,dimGrid>>>(); printf("cuda_testFunc"); return; } extern "C" void cuda_modifyMesh(float a_fpTime, float* a_pfpMesh) { dim3 dimBlock = dim3(16, 16); dim3 dimGrid = dim3(1, 1); modifyMeshGPU<<<dimBlock,dimGrid>>>(a_fpTime, a_pfpMesh); return; }
3,716
#include "includes.h" __global__ void VecAdd() { }
3,717
#include <stdio.h> #include <cuda.h> __global__ void dkernel() { printf("Hello World!"); } int main() { dkernel<<<1, 1>>>(); cudaDeviceSynchronize(); return 0; }
3,718
#include<iostream> #include<fstream> #include<math.h> #include<stdlib.h> #include<curand_kernel.h> #include<curand.h> #include<time.h> #define MAX_CITIES 29 #define MAX_ANTS 14 #define Q 80 #define ALPHA 0.5 #define BETA 0.8 #define RHO 0.5 using namespace std; int n=0; int NC = 0; int t = 0; struct cities { int x,y; }; int s; struct ANTS{ int curCity, nextCity; int visited[MAX_CITIES]; int tour[MAX_CITIES]; float L; }; cities city[MAX_CITIES]; float pheromone[MAX_CITIES][MAX_CITIES]; float dist[MAX_CITIES][MAX_CITIES]; ANTS ant[MAX_ANTS]; int best=9999999; int bestIndex; float Delta_Pheromones[MAX_CITIES][MAX_CITIES]; float numerator[MAX_CITIES][MAX_CITIES]; curandState state[MAX_ANTS]; __global__ void initialize(float *d_dist,float *d_pheromone,float *d_Delta_Pheromones,cities *d_city,int n) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if((row<n)&&(col<n)){ d_dist[col + row * n] = 0.0f; d_pheromone[col + row * n] = 1.0f / n; d_Delta_Pheromones[col + row * n] = 0.0f; if(row!=col) { d_dist[col + row * n]=sqrt(powf(abs(d_city[row].x-d_city[col].x),2)+powf(abs(d_city[row].y-d_city[col].y),2)); } } } __global__ void setup_curand_states(curandState *state_d,int t){ int id = threadIdx.x + blockIdx.x*blockDim.x; curand_init(t, id, 0, &state_d[id]); } __device__ float generate(curandState* globalState, int ind){ //int ind = threadIdx.x; curandState localState = globalState[ind]; float RANDOM = curand_uniform( &localState ); globalState[ind] = localState; return RANDOM; } __global__ void initializeTour(ANTS *d_ant,int n){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id<n){ int j = id; d_ant[id].curCity = j; for(int i=0;i<n;i++) { d_ant[id].visited[i]=0; } d_ant[id].visited[j] = 1; d_ant[id].tour[0] = j; d_ant[id].L = 0.0; } } __global__ void PHI_numerator(float *d_numerator, float *d_dist, float *pheromone, int n){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < n && col < n){ int id = row * n + col; d_numerator[id] = powf( pheromone[id], ALPHA) * powf( (1.0/ d_dist[id]), BETA); } } __device__ int nextCity(int k,int n,float *d_numerator,ANTS *d_ant,curandState *state_d) { int i = d_ant[k].curCity; int j; double sum=0.0; for(j=0;j<n;j++) { if(d_ant[k].visited[j]==0) { sum+= d_numerator[i*n+j]; } } while(1) { j++; if(j >= n) j=0; if(d_ant[k].visited[j] == 0) { float probability = d_numerator[i*n+j]/sum; float random = (float)generate(state_d,i); if(random < probability) { break; } } } return j; } __global__ void tourConstruction(ANTS *d_ant, float *d_dist, float *d_numerator,int n,curandState *state_d) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < n){ for(int s=1;s<n;s++) { int j = nextCity(id, n, d_numerator,d_ant,state_d); d_ant[id].nextCity = j; d_ant[id].visited[j]=1; d_ant[id].tour[s] = j; d_ant[id].L+=d_dist[d_ant[id].curCity * n + j]; d_ant[id].curCity = j; } } } __global__ void endTour(float *Delta_Pheromones, ANTS *ant,float *dist, int *best, int *bestIndex){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k < MAX_ANTS){ ant[k].L += dist[ant[k].curCity * MAX_CITIES + ant[k].tour[0]]; ant[k].curCity = ant[k].tour[0]; int temp = *best; printf("This is before atomicMin %d\n", *best); atomicMin(best, ant[k].L); printf("This is after atomicMin %d\n", *best); if (*best!= temp){ *bestIndex = k; } for(int i = 0; i < MAX_CITIES;i++){ int first = ant[k].tour[i]; int second = ant[k].tour[(i + 1) % MAX_CITIES]; Delta_Pheromones[first * MAX_CITIES + second] += Q/ant[k].L; } } } __global__ void updatePheromone(float *d_pheromone, float *d_Delta_Pheromones, int n){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < n){ for(int s=0;s<n;s++){ if(id!=s) { d_pheromone[id*n+s] *=( 1.0 - RHO); if(d_pheromone[id*n+s]<0.0) { d_pheromone[id*n+s] = (1.0/n); } } d_pheromone[id*n+s] += d_Delta_Pheromones[id*n+s]; d_Delta_Pheromones[id*n+s] = 0; } } } __global__ void emptyTour(ANTS *d_ant,float *d_Delta_Pheromones,int n){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < n){ for(int s=0;s<n;s++){ d_ant[id].tour[s] = 0; d_ant[id].visited[s] = 0; } } } int main(int argc, char *argv[]) { clock_t start = clock(); if (argc > 1){ cout << "Accessing file "<< argv[1]<<endl; } else{ cout << "Input File Name!" <<endl; return 1; } ifstream in; in.open(argv[1]); in>>n; cout<<n<<endl; int num; for(int i=0;i<n;i++) { in>>num; in>>city[i].x; in>>city[i].y; cout<<city[i].x<<" "<<city[i].y<<" "<<endl; } dim3 blockDim(32, 32, 1); dim3 gridDim((n - 1)/ 32 + 1, (n - 1)/ 32 + 1, 1 ); float *d_dist,*d_pheromone,*d_Delta_Pheromones,*d_numerator; ANTS *d_ant; cities *d_city; curandState *state_d; int *d_best, *d_bestIndex; cudaMalloc((void**)&d_pheromone, sizeof(float) * n * n); cudaMalloc((void**)&d_dist, sizeof(float) * n * n); cudaMalloc((void**)&d_Delta_Pheromones, sizeof(float) * n * n); cudaMalloc((void**)&d_ant, sizeof(ANTS) * n); cudaMalloc((void**)&d_city, sizeof(cities) * n); cudaMalloc((void**)&d_numerator, sizeof(float) * n *n); cudaMalloc( (void**) &state_d, sizeof(state)); cudaMalloc((void **)&d_best, sizeof(int)); cudaMalloc((void **)&d_bestIndex, sizeof(int)); cudaMemcpy(d_city,city,sizeof(cities) * n,cudaMemcpyHostToDevice); srand(time(0)); cudaMemcpy(d_best, &best, sizeof(int), cudaMemcpyHostToDevice); int seed = rand(); setup_curand_states <<< (n-1)/32+1,32 >>> (state_d,seed); initialize<<<gridDim, blockDim>>>(d_dist,d_pheromone,d_Delta_Pheromones,d_city,n); cudaMemcpy(dist,d_dist,sizeof(float) * n * n,cudaMemcpyDeviceToHost); cudaMemcpy(pheromone,d_pheromone,sizeof(float) * n * n,cudaMemcpyDeviceToHost); cudaMemcpy(Delta_Pheromones,d_Delta_Pheromones,sizeof(float) * n * n,cudaMemcpyDeviceToHost); int MAX_TIME = 20; for(;;) { initializeTour<<<(n-1)/32+1,32>>>(d_ant,n); cudaThreadSynchronize(); PHI_numerator<<< gridDim, blockDim>>>(d_numerator, d_dist, d_pheromone, n); cudaThreadSynchronize(); tourConstruction<<<(n-1)/32+1,32>>>(d_ant,d_dist,d_numerator,n,state_d); cudaThreadSynchronize(); cudaMemcpy(ant,d_ant,sizeof(ANTS) * n,cudaMemcpyDeviceToHost); endTour<<<(n - 1)/32 + 1, 32>>>(d_Delta_Pheromones, d_ant, d_dist, d_best, d_bestIndex); updatePheromone<<< (n-1)/32+1,32>>>(d_pheromone,d_Delta_Pheromones,n); cudaThreadSynchronize(); t += MAX_ANTS; NC += 1; if(NC < MAX_TIME){ emptyTour<<<(n-1)/32+1,32>>>(d_ant,d_Delta_Pheromones,n); cudaMemcpy(&best, d_best, sizeof(int), cudaMemcpyDeviceToHost); cout<<"Best so far = "<<best<<endl; cudaThreadSynchronize(); } else{ break; } } cout<<endl; cudaMemcpy(&best, d_best, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&bestIndex, d_bestIndex, sizeof(int), cudaMemcpyDeviceToHost); for(int i=0;i<n;i++) { cout<<ant[bestIndex].tour[i]<<" "; } cout<<endl; cout<<"\n Best tour = "<<best<<endl<<endl<<endl; clock_t last = clock(); cout<< double(last - start) / CLOCKS_PER_SEC <<endl; return 0; }
3,719
#include "includes.h" __global__ void ComputeCubes2Kernel( float *pointsCoordinates, float *vertexData, int quadOffset, float cubeSide, float *cubeOperation, float *cubeTexCoordinates, int *activityFlag, float textureWidth, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells * 6) { int cellId = threadId / 6; float fCellId = (float)cellId; int sideId = threadId % 6; float x = pointsCoordinates[cellId * 3]; float y = pointsCoordinates[cellId * 3 + 1]; float z = pointsCoordinates[cellId * 3 + 2]; float halfSide = (activityFlag[cellId] == 1) * 0.50f * cubeSide; int textureOffset = quadOffset + maxCells * 4 * 6 * 3; float textureAbsLength = (float)maxCells * textureWidth; vertexData[quadOffset + cellId * 72 + 12*sideId] = x + operationMaskConstant[12*sideId] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 1] = y + operationMaskConstant[12*sideId + 1] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 2] = z + operationMaskConstant[12*sideId + 2] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 3] = x + operationMaskConstant[12*sideId + 3] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 4] = y + operationMaskConstant[12*sideId + 4] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 5] = z + operationMaskConstant[12*sideId + 5] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 6] = x + operationMaskConstant[12*sideId + 6] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 7] = y + operationMaskConstant[12*sideId + 7] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 8] = z + operationMaskConstant[12*sideId + 8] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 9] = x + operationMaskConstant[12*sideId + 9] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 10] = y + operationMaskConstant[12*sideId + 10] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 11] = z + operationMaskConstant[12*sideId + 11] * halfSide; vertexData[textureOffset + cellId * 48 + 8 * sideId] = ((fCellId + cubeTexCoordinatesConstant[sideId * 8])* textureWidth) / textureAbsLength; vertexData[textureOffset + cellId * 48 + 8 * sideId + 1] = cubeTexCoordinatesConstant[sideId * 8 + 1]; vertexData[textureOffset + cellId * 48 + 8 * sideId + 2] = ((fCellId + cubeTexCoordinatesConstant[sideId * 8 + 2]) * textureWidth) / textureAbsLength; vertexData[textureOffset + cellId * 48 + 8 * sideId + 3] = cubeTexCoordinatesConstant[sideId * 8 + 3]; vertexData[textureOffset + cellId * 48 + 8 * sideId + 4] = ((fCellId + cubeTexCoordinatesConstant[sideId * 8 + 4]) * textureWidth) / textureAbsLength; vertexData[textureOffset + cellId * 48 + 8 * sideId + 5] = cubeTexCoordinatesConstant[sideId * 8 + 5]; vertexData[textureOffset + cellId * 48 + 8 * sideId + 6] = ((fCellId + cubeTexCoordinatesConstant[sideId * 8 + 6]) * textureWidth) / textureAbsLength; vertexData[textureOffset + cellId * 48 + 8 * sideId + 7] = cubeTexCoordinatesConstant[sideId * 8 + 7]; /* vertexData[quadOffset + cellId * 72 + 12*sideId] = x + cubeOperation[12*sideId] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 1] = y + cubeOperation[12*sideId + 1] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 2] = z + cubeOperation[12*sideId + 2] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 3] = x + cubeOperation[12*sideId + 3] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 4] = y + cubeOperation[12*sideId + 4] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 5] = z + cubeOperation[12*sideId + 5] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 6] = x + cubeOperation[12*sideId + 6] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 7] = y + cubeOperation[12*sideId + 7] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 8] = z + cubeOperation[12*sideId + 8] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 9] = x + cubeOperation[12*sideId + 9] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 10] = y + cubeOperation[12*sideId + 10] * halfSide; vertexData[quadOffset + cellId * 72 + 12*sideId + 11] = z + cubeOperation[12*sideId + 11] * halfSide; vertexData[textureOffset + cellId * 48 + 8 * sideId] = ((fCellId + cubeTexCoordinates[sideId * 8])* textureWidth) / textureAbsLength; vertexData[textureOffset + cellId * 48 + 8 * sideId + 1] = cubeTexCoordinates[sideId * 8 + 1]; vertexData[textureOffset + cellId * 48 + 8 * sideId + 2] = ((fCellId + cubeTexCoordinates[sideId * 8 + 2]) * textureWidth) / textureAbsLength; vertexData[textureOffset + cellId * 48 + 8 * sideId + 3] = cubeTexCoordinates[sideId * 8 + 3]; vertexData[textureOffset + cellId * 48 + 8 * sideId + 4] = ((fCellId + cubeTexCoordinates[sideId * 8 + 4]) * textureWidth) / textureAbsLength; vertexData[textureOffset + cellId * 48 + 8 * sideId + 5] = cubeTexCoordinates[sideId * 8 + 5]; vertexData[textureOffset + cellId * 48 + 8 * sideId + 6] = ((fCellId + cubeTexCoordinates[sideId * 8 + 6]) * textureWidth) / textureAbsLength; vertexData[textureOffset + cellId * 48 + 8 * sideId + 7] = cubeTexCoordinates[sideId * 8 + 7]; */ } }
3,720
#include "includes.h" __global__ void CopyRectangleKernel( float *src, int srcOffset, int srcWidth, int srcRectX, int srcRectY, int rectWidth, int rectHeight, float *dest, int destOffset, int destWidth, int destRectX, int destRectY ) { int id = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; int size = rectWidth * rectHeight; if (id < size) { int localX = id % rectWidth; int localY = id / rectWidth; int srcPixelX = srcRectX + localX; int srcPixelY = srcRectY + localY; int destPixelX = destRectX + localX; int destPixelY = destRectY + localY; (dest + destOffset)[destPixelX + destPixelY * destWidth] = (src + srcOffset)[srcPixelX + srcPixelY * srcWidth]; } }
3,721
#include <stdio.h> #include <stdlib.h> __device__ int smallerDst (int a, int b){ if(a < b){ return a; } else { return b; } } /*ending of device funtion */ __global__ void strongestNeighborScan_gpu(int * src, int * oldDst, int * newDst, int * oldWeight, int * newWeight, int * madeChanges, int distance, int numEdges) { /*YOUR CODE HERE*/ int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; int total_threads = blockDim.x * gridDim.x; for(i = tid; i < numEdges; i += total_threads){ if(tid >= numEdges){ return; } if (src[i] == src[i-distance]){ /* if the element is in the same segment */ if(oldWeight[i] == oldWeight[i-distance]){ /*if the two weights are equal */ newDst[i] = smallerDst (oldDst[i], oldDst[i-distance]); newWeight[i] = oldWeight[i]; } /*second if statement */ else { newWeight[i] = max (oldWeight[i], oldWeight[i-distance]); if (newWeight[i] == oldWeight[i]) newDst[i] = oldDst[i]; if (newWeight[i] == oldWeight[i-distance]) newDst[i] = oldDst[i-distance]; } } /*first if statement */ else { newWeight[i] = oldWeight[i]; /* when the element is in a different segment, it takes its old weight as the new weight */ newDst[i] = oldDst[i]; } if(oldDst[i] != newDst[i]){ * madeChanges = 1; } } /*ending of for loop */ } /*ending of main */
3,722
#include "includes.h" __global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx == 0) sizes[idx] = accumulatedSize[0]; else if (idx < size) { sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1]; } }
3,723
#include <iostream> #include <stdio.h> #include <cuda.h> #define BLOCK_SIZE 1024 #define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__) template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << cudaGetErrorString(err) << " " << func << std::endl; exit(1); } } #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) __global__ void max_min_cuda(float *d_in1, float *d_in2, float *d_max, float *d_min, size_t nb) { int ft_id = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; int size = (blockIdx.x == gridDim.x - 1) ? (nb % blockDim.x) : blockDim.x; for (size_t s = blockDim.x / 2; s > 0; s >>= 1) { if (ft_id + s < nb && tid < s) { d_in1[ft_id] = (d_in1[ft_id] > d_in1[ft_id + s]) ? d_in1[ft_id] : d_in1[ft_id + s]; if (size % 2 == 1 && ft_id + s + s == size - 1) d_in1[ft_id] = (d_in1[ft_id] > d_in1[ft_id + s + s]) ? d_in1[ft_id] : d_in1[ft_id + s + s]; d_in2[ft_id] = (d_in2[ft_id] < d_in2[ft_id + s]) ? d_in2[ft_id] : d_in2[ft_id + s]; if (size % 2 == 1 && ft_id + s + s == size - 1) d_in2[ft_id] = (d_in2[ft_id] < d_in2[ft_id + s + s]) ? d_in2[ft_id] : d_in2[ft_id + s + s]; } __syncthreads(); size /= 2; } if (tid == 0) { d_max[blockIdx.x] = d_in1[ft_id]; d_min[blockIdx.x] = d_in2[ft_id]; } // __syncthreads(); // for (int i = 0; i < GRID_SIZE; i++) // printf("d_out[%d] = %f\n", i, d_out[i]); } void max_min(float *h_values, size_t size, float &h_min, float &h_max) { size_t grid_size = size / BLOCK_SIZE + 1; float *d_values; float **d_values_ = &d_values; float *d_values2; float **d_values2_ = &d_values2; float *maxs = (float *)malloc(sizeof(float) * grid_size); float *d_max; float **d_max_ = &d_max; float *mins = (float *)malloc(sizeof(float) * grid_size); float *d_min; float **d_min_ = &d_min; // malloc values and max checkCudaErrors(cudaMalloc(d_values_, sizeof(float) * size)); checkCudaErrors(cudaMalloc(d_values2_, sizeof(float) * size)); checkCudaErrors(cudaMalloc(d_max_, sizeof(float) * grid_size)); checkCudaErrors(cudaMalloc(d_min_, sizeof(float) * grid_size)); // memcopy values checkCudaErrors(cudaMemcpy(d_values, h_values, sizeof(float) * size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_values2, h_values, sizeof(float) * size, cudaMemcpyHostToDevice)); // kernel max_min_cuda<<<grid_size, BLOCK_SIZE>>>(d_values, d_values2, d_max, d_min, size); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // printf("GRID SIZE %d\n", GRID_SIZE); // memcpy results checkCudaErrors(cudaMemcpy(maxs, d_max, sizeof(float) * grid_size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(mins, d_min, sizeof(float) * grid_size, cudaMemcpyDeviceToHost)); h_min = mins[0]; h_max = maxs[0]; for (int i = 0; i < grid_size; i++) { if (h_max < maxs[i]) h_max = maxs[i]; if (h_min > mins[i]) h_min = mins[i]; } // free the three cudaFree(d_max_); cudaFree(d_min_); cudaFree(d_values_); cudaFree(d_values2_); }
3,724
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void curvi (float * __restrict__ r1, float *__restrict__ in_u1, float * __restrict__ in_u2, float *__restrict__ in_u3, float * __restrict__ in_mu, float * __restrict__ in_la, float * __restrict__ in_met1, float * __restrict__ in_met2, float * __restrict__ in_met3, float * __restrict__ in_met4, float * __restrict__ strx, float * __restrict__ stry, float c1, float c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); float (*u1)[304][304] = (float (*)[304][304])in_u1; float (*u2)[304][304] = (float (*)[304][304])in_u2; float (*u3)[304][304] = (float (*)[304][304])in_u3; float (*mu)[304][304] = (float (*)[304][304])in_mu; float (*la)[304][304] = (float (*)[304][304])in_la; float (*met1)[304][304] = (float (*)[304][304])in_met1; float (*met2)[304][304] = (float (*)[304][304])in_met2; float (*met3)[304][304] = (float (*)[304][304])in_met3; float (*met4)[304][304] = (float (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3) { for (int i=2; i<=N-3; i++) { r1[i*N*N+j*N+k] += c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*( c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) + c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) ) + mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*( c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+ c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) ) ) + c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*( c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) + c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) ) + mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*( c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) + c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k]))) + c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*( c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) + c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) ) + la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*( c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+ c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) ) ) + c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*( c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) + c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) ) + la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*( c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) + c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k]))); r1[i*N*N+j*N+k] += c2*( (2*mu[i][j][k+2]+la[i][j][k+2])*met2[i][j][k+2]*met1[i][j][k+2]*( c2*(u1[i+2][j][k+2]-u1[i-2][j][k+2]) + c1*(u1[i+1][j][k+2]-u1[i-1][j][k+2]) )*strx[i]*stry[j] + mu[i][j][k+2]*met3[i][j][k+2]*met1[i][j][k+2]*( c2*(u2[i+2][j][k+2]-u2[i-2][j][k+2]) + c1*(u2[i+1][j][k+2]-u2[i-1][j][k+2]) ) + mu[i][j][k+2]*met4[i][j][k+2]*met1[i][j][k+2]*( c2*(u3[i+2][j][k+2]-u3[i-2][j][k+2]) + c1*(u3[i+1][j][k+2]-u3[i-1][j][k+2]) )*stry[j] + ((2*mu[i][j][k-2]+la[i][j][k-2])*met2[i][j][k-2]*met1[i][j][k-2]*( c2*(u1[i+2][j][k-2]-u1[i-2][j][k-2]) + c1*(u1[i+1][j][k-2]-u1[i-1][j][k-2]) )*strx[i]*stry[j] + mu[i][j][k-2]*met3[i][j][k-2]*met1[i][j][k-2]*( c2*(u2[i+2][j][k-2]-u2[i-2][j][k-2]) + c1*(u2[i+1][j][k-2]-u2[i-1][j][k-2]) ) + mu[i][j][k-2]*met4[i][j][k-2]*met1[i][j][k-2]*( c2*(u3[i+2][j][k-2]-u3[i-2][j][k-2]) + c1*(u3[i+1][j][k-2]-u3[i-1][j][k-2]) )*stry[j] ) ) + c1*( (2*mu[i][j][k+1]+la[i][j][k+1])*met2[i][j][k+1]*met1[i][j][k+1]*( c2*(u1[i+2][j][k+1]-u1[i-2][j][k+1]) + c1*(u1[i+1][j][k+1]-u1[i-1][j][k+1]) )*strx[i+2]*stry[j] + mu[i][j][k+1]*met3[i][j][k+1]*met1[i][j][k+1]*( c2*(u2[i+2][j][k+1]-u2[i-2][j][k+1]) + c1*(u2[i+1][j][k+1]-u2[i-1][j][k+1]) ) + mu[i][j][k+1]*met4[i][j][k+1]*met1[i][j][k+1]*( c2*(u3[i+2][j][k+1]-u3[i-2][j][k+1]) + c1*(u3[i+1][j][k+1]-u3[i-1][j][k+1]) )*stry[j] + ((2*mu[i][j][k-1]+la[i][j][k-1])*met2[i][j][k-1]*met1[i][j][k-1]*( c2*(u1[i+2][j][k-1]-u1[i-2][j][k-1]) + c1*(u1[i+1][j][k-1]-u1[i-1][j][k-1]) )*strx[i-2]*stry[j] + mu[i][j][k-1]*met3[i][j][k-1]*met1[i][j][k-1]*( c2*(u2[i+2][j][k-1]-u2[i-2][j][k-1]) + c1*(u2[i+1][j][k-1]-u2[i-1][j][k-1]) ) + mu[i][j][k-1]*met4[i][j][k-1]*met1[i][j][k-1]*( c2*(u3[i+2][j][k-1]-u3[i-2][j][k-1]) + c1*(u3[i+1][j][k-1]-u3[i-1][j][k-1]) )*stry[j] ) ); r1[i*N*N+j*N+k] += ( c2*( (2*mu[i+2][j][k]+la[i+2][j][k])*met2[i+2][j][k]*met1[i+2][j][k]*( c2*(u1[i+2][j][k+2]-u1[i+2][j][k-2]) + c1*(u1[i+2][j][k+1]-u1[i+2][j][k-1]) )*strx[i] + la[i+2][j][k]*met3[i+2][j][k]*met1[i+2][j][k]*( c2*(u2[i+2][j][k+2]-u2[i+2][j][k-2]) + c1*(u2[i+2][j][k+1]-u2[i+2][j][k-1]) )*stry[j] + la[i+2][j][k]*met4[i+2][j][k]*met1[i+2][j][k]*( c2*(u3[i+2][j][k+2]-u3[i+2][j][k-2]) + c1*(u3[i+2][j][k+1]-u3[i+2][j][k-1]) ) + ((2*mu[i-2][j][k]+la[i-2][j][k])*met2[i-2][j][k]*met1[i-2][j][k]*( c2*(u1[i-2][j][k+2]-u1[i-2][j][k-2]) + c1*(u1[i-2][j][k+1]-u1[i-2][j][k-1]) )*strx[i] + la[i-2][j][k]*met3[i-2][j][k]*met1[i-2][j][k]*( c2*(u2[i-2][j][k+2]-u2[i-2][j][k-2]) + c1*(u2[i-2][j][k+1]-u2[i-2][j][k-1]) )*stry[j] + la[i-2][j][k]*met4[i-2][j][k]*met1[i-2][j][k]*( c2*(u3[i-2][j][k+2]-u3[i-2][j][k-2]) + c1*(u3[i-2][j][k+1]-u3[i-2][j][k-1]) ) ) ) + c1*( (2*mu[i+1][j][k]+la[i+1][j][k])*met2[i+1][j][k]*met1[i+1][j][k]*( c2*(u1[i+1][j][k+2]-u1[i+1][j][k-2]) + c1*(u1[i+1][j][k+1]-u1[i+1][j][k-1]) )*strx[i] + la[i+1][j][k]*met3[i+1][j][k]*met1[i+1][j][k]*( c2*(u2[i+1][j][k+2]-u2[i+1][j][k-2]) + c1*(u2[i+1][j][k+1]-u2[i+1][j][k-1]) )*stry[j] + la[i+1][j][k]*met4[i+1][j][k]*met1[i+1][j][k]*( c2*(u3[i+1][j][k+2]-u3[i+1][j][k-2]) + c1*(u3[i+1][j][k+1]-u3[i+1][j][k-1]) ) + ((2*mu[i-1][j][k]+la[i-1][j][k])*met2[i-1][j][k]*met1[i-1][j][k]*( c2*(u1[i-1][j][k+2]-u1[i-1][j][k-2]) + c1*(u1[i-1][j][k+1]-u1[i-1][j][k-1]) )*strx[i] + la[i-1][j][k]*met3[i-1][j][k]*met1[i-1][j][k]*( c2*(u2[i-1][j][k+2]-u2[i-1][j][k-2]) + c1*(u2[i-1][j][k+1]-u2[i-1][j][k-1]) )*stry[j] + la[i-1][j][k]*met4[i-1][j][k]*met1[i-1][j][k]*( c2*(u3[i-1][j][k+2]-u3[i-1][j][k-2]) + c1*(u3[i-1][j][k+1]-u3[i-1][j][k-1]) ) ) ) )*stry[j]; r1[i*N*N+j*N+k] += c2*( mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*( c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) + c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i] + mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*( c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) + c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) ) + ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*( c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) + c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i] + mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*( c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) + c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) ) ) + c1*( mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*( c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) + c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i] + mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*( c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) + c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) ) + ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*( c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) + c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i] + mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*( c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) + c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) ); r1[i*N*N+j*N+k] += c2*( mu[i][j][k+2]*met3[i][j][k+2]*met1[i][j][k+2]*( c2*(u1[i][j+2][k+2]-u1[i][j-2][k+2]) + c1*(u1[i][j+1][k+2]-u1[i][j-1][k+2]) )*stry[j+2]*strx[i] + la[i][j][k+2]*met2[i][j][k+2]*met1[i][j][k+2]*( c2*(u2[i][j+2][k+2]-u2[i][j-2][k+2]) + c1*(u2[i][j+1][k+2]-u2[i][j-1][k+2]) ) + ( mu[i][j][k-2]*met3[i][j][k-2]*met1[i][j][k-2]*( c2*(u1[i][j+2][k-2]-u1[i][j-2][k-2]) + c1*(u1[i][j+1][k-2]-u1[i][j-1][k-2]) )*stry[j]*strx[i] + la[i][j][k-2]*met2[i][j][k-2]*met1[i][j][k-2]*( c2*(u2[i][j+2][k-2]-u2[i][j-2][k-2]) + c1*(u2[i][j+1][k-2]-u2[i][j-1][k-2]) ) ) ) + c1*( mu[i][j][k+1]*met3[i][j][k+1]*met1[i][j][k+1]*( c2*(u1[i][j+2][k+1]-u1[i][j-2][k+1]) + c1*(u1[i][j+1][k+1]-u1[i][j-1][k+1]) )*stry[j-2]*strx[i] + la[i][j][k+1]*met2[i][j][k+1]*met1[i][j][k+1]*( c2*(u2[i][j+2][k+1]-u2[i][j-2][k+1]) + c1*(u2[i][j+1][k+1]-u2[i][j-1][k+1]) ) + ( mu[i][j][k-1]*met3[i][j][k-1]*met1[i][j][k-1]*( c2*(u1[i][j+2][k-1]-u1[i][j-2][k-1]) + c1*(u1[i][j+1][k-1]-u1[i][j-1][k-1]) )*stry[j]*strx[i] + la[i][j][k-1]*met2[i][j][k-1]*met1[i][j][k-1]*( c2*(u2[i][j+2][k-1]-u2[i][j-2][k-1]) + c1*(u2[i][j+1][k-1]-u2[i][j-1][k-1]) ) ) ); } } } extern "C" void host_code (float *h_r1, float *h_u1, float *h_u2, float *h_u3, float *h_mu, float *h_la, float *h_met1, float *h_met2, float *h_met3, float *h_met4, float *h_strx, float *h_stry, float c1, float c2, int N) { float *r1; cudaMalloc (&r1, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for r1\n"); cudaMemcpy (r1, h_r1, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *u1; cudaMalloc (&u1, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for u1\n"); cudaMemcpy (u1, h_u1, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *u2; cudaMalloc (&u2, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for u2\n"); cudaMemcpy (u2, h_u2, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *u3; cudaMalloc (&u3, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for u3\n"); cudaMemcpy (u3, h_u3, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *mu; cudaMalloc (&mu, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *la; cudaMalloc (&la, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *met1; cudaMalloc (&met1, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for met1\n"); cudaMemcpy (met1, h_met1, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *met2; cudaMalloc (&met2, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for met2\n"); cudaMemcpy (met2, h_met2, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *met3; cudaMalloc (&met3, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for met3\n"); cudaMemcpy (met3, h_met3, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *met4; cudaMalloc (&met4, sizeof(float)*N*N*N); check_error ("Failed to allocate device memory for met4\n"); cudaMemcpy (met4, h_met4, sizeof(float)*N*N*N, cudaMemcpyHostToDevice); float *strx; cudaMalloc (&strx, sizeof(float)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(float)*N, cudaMemcpyHostToDevice); float *stry; cudaMalloc (&stry, sizeof(float)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(float)*N, cudaMemcpyHostToDevice); dim3 blockconfig (32, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); cudaMemcpy (h_r1, r1, sizeof(float)*N*N*N, cudaMemcpyDeviceToHost); }
3,725
/*************************************************************************** ************************************************************************** Spherical Harmonic Transform Kit 2.7 Copyright 1997-2003 Sean Moore, Dennis Healy, Dan Rockmore, Peter Kostelec Copyright 2004 Peter Kostelec, Dan Rockmore This file is part of SpharmonicKit. SpharmonicKit is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. SpharmonicKit is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. See the accompanying LICENSE file for details. ************************************************************************ ************************************************************************/ /* quadrature weights file */ /** contains precomputed arrays of Legendre quadrature weight values for ODD order !!! The sin factor is already present here !!! In a nutshell, the purpose of precomputing these weights is to save a multiplication in seminaivex.c (when the order is odd). **/ /*** The interface function to these arrays is defined at the end of this file. It is const double *get_oddweights( int bw ) ***/ __device__ double ow4[8] = {0.0130677244459852, 0.1238854579060207, 0.2695229693546575, 0.3784620915132186, 0.3784620915135136, 0.2695229693547305, 0.1238854579060396, 0.0130677244459872}; __device__ double ow8[16] = {0.0016469580173477, 0.0169341816445308, 0.0432357135521227, 0.0793813894095676, 0.1170279554057073, 0.1529422668923013, 0.1796652878558445, 0.1945100966313875, 0.1945100966320044, 0.1796652878560336, 0.1529422668923990, 0.1170279554057610, 0.0793813894095944, 0.0432357135521349, 0.0169341816445349, 0.0016469580173482}; __device__ double ow16[32] = {0.0002062930740058, 0.0021640291839523, 0.0057415678114794, 0.0111972504895041, 0.0178930728089408, 0.0259990600279925, 0.0347897736331906, 0.0443206791778128, 0.0538581883411703, 0.0633727713806255, 0.0721958552759619, 0.0802540295805748, 0.0870119991694323, 0.0923933475135159, 0.0960521727207131, 0.0979413816063274, 0.0979413816075809, 0.0960521727211216, 0.0923933475137517, 0.0870119991695906, 0.0802540295806897, 0.0721958552760469, 0.0633727713806883, 0.0538581883412164, 0.0443206791778439, 0.0347897736332128, 0.0259990600280075, 0.0178930728089499, 0.0111972504895093, 0.0057415678114816, 0.0021640291839533, 0.0002062930740059}; __device__ double ow32[64] = {0.0000257998762313, 0.0002719963328992, 0.0007285551655023, 0.0014419143849654, 0.0023492196299524, 0.0034990775392484, 0.0048228709609411, 0.0063650580023922, 0.0080542400902157, 0.0099298000591963, 0.0119191123704765, 0.0140563265952599, 0.0162689596309466, 0.0185860547971697, 0.0209366261496682, 0.0233449007140944, 0.0257427470523766, 0.0281499721421785, 0.0305026394644358, 0.0328165981698009, 0.0350333990269573, 0.0371654265132021, 0.0391609283963208, 0.0410293162611077, 0.0427266274173210, 0.0442597612670725, 0.0455934877956728, 0.0467325973884478, 0.0476513580258637, 0.0483527742646833, 0.0488211762339878, 0.0490580082650788, 0.0490580082676000, 0.0488211762348225, 0.0483527742651796, 0.0476513580262125, 0.0467325973887132, 0.0455934877958839, 0.0442597612672463, 0.0427266274174657, 0.0410293162612303, 0.0391609283964271, 0.0371654265132945, 0.0350333990270351, 0.0328165981698689, 0.0305026394644934, 0.0281499721422278, 0.0257427470524194, 0.0233449007141283, 0.0209366261496963, 0.0185860547971938, 0.0162689596309663, 0.0140563265952755, 0.0119191123704894, 0.0099298000592062, 0.0080542400902239, 0.0063650580023980, 0.0048228709609452, 0.0034990775392512, 0.0023492196299544, 0.0014419143849666, 0.0007285551655031, 0.0002719963328996, 0.0000257998762314}; __device__ double ow64[128] = {0.0000032253984437, 0.0000340463131158, 0.0000914113258540, 0.0001815794657534, 0.0002972579769116, 0.0004454616309845, 0.0006184538564501, 0.0008232297879527, 0.0010518793946314, 0.0013112566599011, 0.0015933553489263, 0.0019048449349610, 0.0022376655422937, 0.0025982788787924, 0.0029786044203960, 0.0033848806292108, 0.0038090361826578, 0.0042570748486767, 0.0047209633097769, 0.0052064617945353, 0.0057056035126863, 0.0062238982588875, 0.0067534742806377, 0.0072995856431080, 0.0078544841887785, 0.0084231643337913, 0.0089980300767539, 0.0095838134769448, 0.0101730991587936, 0.0107703551919536, 0.0113683750802881, 0.0119713622227730, 0.0125723468987302, 0.0131752679901405, 0.0137734199390811, 0.0143704779852115, 0.0149600274557836, 0.0155454814319868, 0.0161207420259307, 0.0166889621432480, 0.0172443856007590, 0.0177899075024394, 0.0183201371555761, 0.0188377145219606, 0.0193376369013747, 0.0198222919565129, 0.0202870860544883, 0.0207341574880596, 0.0211593412034746, 0.0215645290465210, 0.0219460023643870, 0.0223054093866584, 0.0226394938764397, 0.0229496631067230, 0.0232331373589767, 0.0234910853670587, 0.0237212160271207, 0.0239244616468889, 0.0240990297467417, 0.0242456179638030, 0.0243629402984804, 0.0244514610722414, 0.0245104064147662, 0.0245400082524635, 0.0245400082575181, 0.0245104064164491, 0.0244514610732485, 0.0243629402991958, 0.0242456179643576, 0.0240990297471916, 0.0239244616472670, 0.0237212160274452, 0.0234910853673425, 0.0232331373592268, 0.0229496631069464, 0.0226394938766414, 0.0223054093868395, 0.0219460023645526, 0.0215645290466719, 0.0211593412036139, 0.0207341574881874, 0.0202870860546083, 0.0198222919566235, 0.0193376369014777, 0.0188377145220570, 0.0183201371556643, 0.0177899075025212, 0.0172443856008336, 0.0166889621433190, 0.0161207420259947, 0.0155454814320465, 0.0149600274558390, 0.0143704779852628, 0.0137734199391291, 0.0131752679901853, 0.0125723468987703, 0.0119713622228087, 0.0113683750803208, 0.0107703551919828, 0.0101730991588207, 0.0095838134769692, 0.0089980300767763, 0.0084231643338125, 0.0078544841887972, 0.0072995856431249, 0.0067534742806529, 0.0062238982589012, 0.0057056035126984, 0.0052064617945460, 0.0047209633097861, 0.0042570748486851, 0.0038090361826649, 0.0033848806292171, 0.0029786044204013, 0.0025982788787973, 0.0022376655422979, 0.0019048449349640, 0.0015933553489289, 0.0013112566599029, 0.0010518793946331, 0.0008232297879540, 0.0006184538564511, 0.0004454616309852, 0.0002972579769120, 0.0001815794657536, 0.0000914113258541, 0.0000340463131159, 0.0000032253984437}; /************************************************************************/ /* returns a pointer to the proper quadrature weights for a given bandwidth bw and IF THE ORDER IS ODD. Weights array is of size 2*bw. A closed form for the weights can be found in the original Driscoll and Healy paper, but need to be normalized */ __device__ double *get_oddweights( int bw ) { switch(bw) { case 4: return ow4; case 8: return ow8; case 16: return ow16; case 32: return ow32; default: return 0; } }
3,726
#include <iostream> #include <stdlib.h> #include <cuda_runtime.h> #include <stdio.h> using namespace std; typedef struct { int width; int height; float* data; } Matrix; #define BLOCK_SIZE 2 __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); void showMatrix(Matrix m); void MatMaul(const Matrix A, const Matrix B, Matrix C) { Matrix dA; dA.height = A.height; dA.width = A.width; size_t size = dA.width * dA.height * sizeof(float); cudaMalloc(&dA.data, size); cudaMemcpy(dA.data, A.data, size, cudaMemcpyHostToDevice); Matrix dB; dB.height = B.height; dB.width = B.width; size = dB.width * dB.height * sizeof(float); cudaMalloc(&dB.data, size); cudaMemcpy(dB.data, B.data, size, cudaMemcpyHostToDevice); Matrix dC; dC.height = C.height; dC.width = C.width; size = dC.width * dC.height * sizeof(float); cudaMalloc(&dC.data, size); // cudaMemcpy(dC.data, C.data, size, cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(dA, dB, dC); cudaMemcpy(C.data, dC.data, size, cudaMemcpyDeviceToHost); cudaFree(dA.data); cudaFree(dB.data); cudaFree(dC.data); showMatrix(C); } __global__ void MatMulKernel(const Matrix A, const Matrix B, Matrix C) { float value = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < A.width; i++) { value += A.data[row * A.width + i] * B.data[i * B.width + col]; } C.data[row * C.width + col] = value; } void showMatrix(Matrix m) { cout << "size(" << m.height << ", " << m.width << ")" << endl; for (int i = 0; i < m.height; i++) { for (int j = 0; j < m.width; j++) { cout << m.data[i * m.width + j] << "\t"; } cout << endl; } } int main() { Matrix A; Matrix B; Matrix C; A.height = 4; A.width = 2; A.data = (float *)malloc(A.width * A.height * sizeof(float)); for (int i = 0; i < A.height * A.width; i++) { A.data[i] = i + 1; } B.height = 2; B.width = 4; B.data = (float *)malloc(B.width * B.height * sizeof(float)); for (int i = 0; i < B.height * B.width; i++) { B.data[i] = 2 * i; } C.width = A.height; C.height = B.width; C.data = (float *)malloc(C.width * C.height * sizeof(float)); showMatrix(A); showMatrix(B); MatMaul(A, B, C); return 0; }
3,727
// nvcc jcaobi.cu -o jacobi #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define NBLOCKS 32 __global__ void compute_xnext(double *r, double *d, double *x, double *x_next, double *b, int size) { // Size defined as third arg in <<< >>> thing extern __shared__ double x_copy[]; // Copy x to multiply in shared memory for quicker access int idx, row_idx = blockDim.x * blockIdx.x + threadIdx.x; int upBound = NBLOCKS * (threadIdx.x + 1); if (upBound > size) upBound = size; for (idx=NBLOCKS*threadIdx.x; idx < upBound; idx++) x_copy[idx] = x[idx]; if (row_idx >= size) return; __syncthreads(); double y_k = 0.; for (idx=0; idx < size; idx++) y_k += x_copy[idx] * r[row_idx * size + idx]; x_next[row_idx] = (b[row_idx] - y_k) / d[row_idx]; } int main (int argc, char *argvs[]) { if (argc != 4) { printf("usage: %s [size] [epsilon] [verbose: 1/2]\n", argvs[0]); return 1; } // init constants & variables int nBlocks, nThreadsPerBlock, size, idx1, idx2, niter, epsilon_pow, verbose; size = atoi(argvs[1]); epsilon_pow = atoi(argvs[2]); verbose = atoi(argvs[3]); nBlocks = NBLOCKS; nThreadsPerBlock = (int) size / nBlocks + 1; niter = 0; double epsilon = 1.; for (idx1=0; idx1 < epsilon_pow; idx1++) epsilon = epsilon / 10.; double epsilon2 = epsilon * epsilon; double norm2 = epsilon2 + 1.; // to be init over espilon struct timeval t0, t1, t2, t3; size_t size_mat = size * size * sizeof(double); size_t size_vect = size * sizeof(double); // init matrices double *r_mat, *d_vect, *x, *x_next, *b; double *d_r, *d_d, *d_x, *d_xnext, *d_b; r_mat = (double *) calloc(size * size, sizeof(double)); d_vect = (double *) calloc(size, sizeof(double)); x = (double *) calloc(size, sizeof(double)); x_next = (double *) calloc(size, sizeof(double)); b = (double *) calloc(size, sizeof(double)); for (idx1=0; idx1<size; idx1++) { for (idx2=0; idx2<size; idx2++) r_mat[idx1*size + idx2] = 1.; r_mat[idx1*size + idx1] = 0.; d_vect[idx1] = 2. * size + 1.; x[idx1] = 1.; b[idx1] = 3.; } // Allocation & copy on device gettimeofday(&t0,NULL); cudaMalloc(&d_r, size_mat); cudaMalloc(&d_d, size_vect); cudaMalloc(&d_x, size_vect); cudaMalloc(&d_xnext, size_vect); cudaMalloc(&d_b, size_vect); gettimeofday(&t1,NULL); cudaMemcpy(d_r, r_mat, size_mat, cudaMemcpyHostToDevice); cudaMemcpy(d_d, d_vect, size_vect, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size_vect, cudaMemcpyHostToDevice); gettimeofday(&t2,NULL); // Main Loop while (norm2 > epsilon2) { niter ++; // Send x_k cudaMemcpy(d_x, x, size_vect, cudaMemcpyHostToDevice); compute_xnext <<<nBlocks, nThreadsPerBlock, size_vect >>> (d_r, d_d, d_x, d_xnext, d_b, size); // Gather x_k+1 cudaMemcpy(x_next, d_xnext, size_vect, cudaMemcpyDeviceToHost); norm2 = 0.; for (idx1=0; idx1 < size; idx1++) { norm2 += (x[idx1] - x_next[idx1]) * (x[idx1] - x_next[idx1]); x[idx1] = x_next[idx1]; } } gettimeofday(&t3,NULL); cudaFree(d_r); cudaFree(d_d); cudaFree(d_x); cudaFree(d_xnext); cudaFree(d_b); // Verbose double t_alloc = (double)(t1.tv_sec-t0.tv_sec) + \ (double)(t1.tv_usec-t0.tv_usec)/1000000; double t_trans = (double)(t2.tv_sec-t1.tv_sec) + \ (double)(t2.tv_usec-t1.tv_usec)/1000000; double t_calc = (double)(t3.tv_sec-t2.tv_sec) + \ (double)(t3.tv_usec-t2.tv_usec)/1000000; if (verbose == 1) { printf("N = %d\nEpsilon = %10.9f\n", size, epsilon); printf("Nombre d'iterations: %d\n", niter); double norm_error = 0.; for (int i=0; i < size; i++) norm_error += (x[i] - 1./size) * (x[i] - 1./size); printf("Error compared to sol.: %25.24f\n\n", norm_error); printf("Temps d'alloc. device : %f s\n", t_alloc); printf("Temps de transfert init.: %f s\n", t_trans); printf("Temps de calcul: %f s\n", t_calc); } else if (verbose == 2) printf("%d, %12.11f, %d, %f\n", size, epsilon, niter, t_calc); free(r_mat); free(d_vect); free(x); free(x_next); free(b); return 0; }
3,728
#include "includes.h" //#define ITEM_COUNT 2 #define _PI 3.14159265358979323846 #define _PI2 1.57079632679489661923 #define _RAD 6372795 using namespace std; cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void geo_invert(double2* d_dot1, double2* d_dot2, double* d_dist, double* d_azimut, long count) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < count) { d_dot1[idx].x = d_dot1[idx].x * _PI / 180; //lat1 d_dot1[idx].y = d_dot1[idx].y * _PI / 180; //lng1 d_dot2[idx].x = d_dot2[idx].x * _PI / 180; //lat2 d_dot2[idx].y = d_dot2[idx].y * _PI / 180; //lng2 double cl1, cl2, sl1, sl2, delta, cdelta, sdelta; cl1 = cos(d_dot1[idx].x); cl2 = cos(d_dot2[idx].x); sl1 = sin(d_dot1[idx].x); sl2 = sin(d_dot2[idx].x); delta = d_dot2[idx].y - d_dot1[idx].y; cdelta = cos(delta); sdelta = sin(delta); double x, y, z, ad, z2; y = sqrt(pow(cl2*sdelta, 2) + pow(cl1*sl2 - sl1*cl2*cdelta, 2)); x = sl1*sl2 + cl1*cl2*cdelta; ad = atan(y / x); d_dist[idx] = ad * _RAD; x = (cl1*sl2) - (sl1*cl2*cdelta); y = sdelta*cl2; if (x == 0) { if (y > 0) z = -90; else if (y < 0) z = 90; else if (y == 0) z = 0; } else { z = atan(-y / x) * 180 / _PI; if (x < 0) { z = z + 180; } } z2 = z + 180.0f; while (z2 >= 360) { z2 = z2 - 360; } z2 = z2 - 180; z2 = -z2 * _PI / 180; double anglerad2; anglerad2 = z2 - ((2 * _PI) * floor(z2 / (2 * _PI))); d_azimut[idx] = anglerad2 * 180 / _PI; } }
3,729
#include <cuda_runtime.h> #include <stdio.h> #include <time.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\ exit(1);\ }\ }\ void sumArrayOnHost(float *A, float *B,float *C, const int N) { for (int i = 0; i < N; i++) C[i] = A[i] + B[i]; } // CHECK(cudaMemcpy(d_C, gpuRes, bBytes, cudaMemcpyHostToDevice)); __global__ void sumArrayOnDevice(float *A, float *B, float *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } void checkResult(float *host, float *device, const int N) { double epsilon = 1.0E-8; int match = 1; for (int i = 0; i < N; i++) { if (abs(host[i] - device[i]) > epsilon) { match = 0; printf("Don't match!\n"); printf("host %5.2f device %5.2f at current %d\n", host[i], device[i], i); break; } } if (match) printf("Array match\n\n"); return; } void initData(float *inp, int size) { time_t t; srand((unsigned) time(&t)); for (int i = 0; i < size; i++) inp[i] = (float)(rand() & 0xFF) / 10.0f; } int main(int argc, char **argv) { int dev = 0; cudaSetDevice(dev); int nElem = 32; printf("Inp Size %d\n", nElem); size_t nBytes = nElem *sizeof(float); float *h_A, *h_B, *host, *gpu; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); host = (float *)malloc(nBytes); gpu = (float *)malloc(nBytes); initData(h_A, nElem); initData(h_B, nElem); memset(host, 0, nBytes); memset(gpu, 0, nBytes); float *d_A, *d_B, *d_C; cudaMalloc((float**)&d_A, nBytes); cudaMalloc((float**)&d_B, nBytes); cudaMalloc((float**)&d_C, nBytes); cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); dim3 block(nElem); dim3 grid(nElem/block.x); sumArrayOnDevice<<<grid, block>>>(d_A, d_B, d_C); printf("Execution Cfg <<<%d, %d>>>\n", grid.x, block.x); cudaMemcpy(gpu, d_C, nBytes, cudaMemcpyDeviceToHost); sumArrayOnHost(h_A, h_B, host, nElem); checkResult(host, gpu, nElem); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(host); free(gpu); int c = getchar(); return 0; }
3,730
#include <stdlib.h> #include <stdio.h> //#include <mpi.h> #include <png.h> #define checkCuda(error) __checkCuda(error, __FILE__, __LINE__) typedef struct { int r; int g; int b; int a; } RGBA; typedef struct{ int size; int width, height; png_byte color_type; png_byte bit_depth; png_bytep *row_pointers; RGBA* pixels; }PNG_DATA; void copy_pixels(RGBA *position, png_bytep px2){ position->r = px2[0]; position->g = px2[1]; position->b = px2[2]; position->a = px2[3]; } void copy_positions(RGBA position, png_bytep px2){ px2[0] = position.r; px2[1] = position.g; px2[2] = position.b; px2[3] = position.a; } RGBA* translate_px_to_vec(PNG_DATA* dados) { int x,y; int height = dados->height; int width = dados->width; RGBA *pixels = (RGBA*)malloc(sizeof(RGBA)* dados->size); for (y = 0; y < height; y++) { png_bytep row = dados->row_pointers[y]; for (x = 0; x < width; x++) { png_bytep px = &(row[x * 4]); copy_pixels(&(pixels[y*width+x]),px); } } return pixels; } void translate_vec_to_px(RGBA* input, PNG_DATA* dados){ int x,y; int height = dados->height; int width = dados->width; for (y = 0; y < height; y++) { png_bytep row = dados->row_pointers[y]; for (x = 0; x < width; x++) { png_bytep px = &(row[x * 4]); copy_positions(input[y*width+x],px); } } } /*** Leitura e escrita no png ***/ PNG_DATA* read_png_file(char *filename) { int y; PNG_DATA *novo = (PNG_DATA*)malloc(sizeof(PNG_DATA)); FILE *fp = fopen(filename, "rb"); png_structp png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); png_infop info = png_create_info_struct(png); if (!png || !info) abort(); if (setjmp(png_jmpbuf(png))) abort(); png_init_io(png, fp); png_read_info(png, info); novo->width = png_get_image_width(png, info); novo->height = png_get_image_height(png, info); novo->size = novo->width*novo->height; novo->color_type = png_get_color_type(png, info); novo->bit_depth = png_get_bit_depth(png, info); // Read any color_type into 8bit depth, RGBA format. // See http://www.libpng.org/pub/png/libpng-manual.txt if (novo->bit_depth == 16) png_set_strip_16(png); if (novo->color_type == PNG_COLOR_TYPE_PALETTE) png_set_palette_to_rgb(png); // PNG_COLOR_TYPE_GRAY_ALPHA is always 8 or 16bit depth. if (novo->color_type == PNG_COLOR_TYPE_GRAY && novo->bit_depth < 8) png_set_expand_gray_1_2_4_to_8(png); if (png_get_valid(png, info, PNG_INFO_tRNS)) png_set_tRNS_to_alpha(png); // These color_type don't have an alpha channel then fill it with 0xff. if (novo->color_type == PNG_COLOR_TYPE_RGB || novo->color_type == PNG_COLOR_TYPE_GRAY || novo->color_type == PNG_COLOR_TYPE_PALETTE) png_set_filler(png, 0xFF, PNG_FILLER_AFTER); if (novo->color_type == PNG_COLOR_TYPE_GRAY || novo->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) png_set_gray_to_rgb(png); png_read_update_info(png, info); novo->row_pointers = (png_bytep *)malloc(sizeof(png_bytep) * novo->height); for (y = 0; y < novo->height; y++) { novo->row_pointers[y] = (png_byte *)malloc(png_get_rowbytes(png, info)); } png_read_image(png, novo->row_pointers); fclose(fp); return novo; } void write_png_file(char *filename, PNG_DATA* dados) { int width = dados->width; int height = dados->height; FILE *fp = fopen(filename, "wb"); if (!fp) abort(); png_structp png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (!png) abort(); png_infop info = png_create_info_struct(png); if (!info) abort(); if (setjmp(png_jmpbuf(png))) abort(); png_init_io(png, fp); // Output is 8bit depth, RGBA format. png_set_IHDR(png, info, width, height, 8, PNG_COLOR_TYPE_RGBA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_write_info(png, info); // To remove the alpha channel for PNG_COLOR_TYPE_RGB format, // Use png_set_filler(). // png_set_filler(png, 0, PNG_FILLER_AFTER); png_write_image(png, dados->row_pointers); png_write_end(png, NULL); /*for (y = 0; y < height; y++) { free(dados->row_pointers[y]); }*/ //free(dados->row_pointers); fclose(fp); } /*=============================================================================================*/ __global__ void grayscale(RGBA *pixels){ int id = blockIdx.x*blockDim.x + threadIdx.x; int result = 0.21f * pixels[id].r + 0.72f * pixels[id].g + 0.07f * pixels[id].b; pixels[id].r = pixels[id].g = pixels[id].b = result; } __global__ void negative(RGBA *pixels){ int id = blockIdx.x*blockDim.x + threadIdx.x; pixels[id].r = 255-pixels[id].r; pixels[id].g = 255-pixels[id].g; pixels[id].b = 255-pixels[id].b; } __global__ void sepia(RGBA *pixels){ int id = blockIdx.x*blockDim.x + threadIdx.x; pixels[id].r = (pixels[id].r * 0.393f) + (pixels[id].g * 0.769f) + (pixels[id].b * 0.189f); pixels[id].g = (pixels[id].r * 0.349f) + (pixels[id].g * 0.686f) + (pixels[id].b * 0.168f); pixels[id].b = (pixels[id].r * 0.272f) + (pixels[id].g * 0.534f) + (pixels[id].b * 0.131f); } int size=150*100; int testSize = 1000; int processar = 400; int main(int argc, char *argv[]){ int cont = 0; int gpu1 = 0; int gpu2 = 1; RGBA *d1_buffer; RGBA *d2_buffer; RGBA* result; clock_t start,end; start=clock(); char test[] = "150x100.png"; PNG_DATA* imageData = read_png_file(test); cudaSetDevice(gpu1); cudaMalloc(&d1_buffer, (size*sizeof(RGBA))); cudaSetDevice(gpu2); cudaMalloc(&d2_buffer, (size*sizeof(RGBA))); cudaMalloc(&result, (size*sizeof(RGBA))); while (cont < testSize) { cudaSetDevice(gpu1); //cudaStream_t stream_0; //cudaStreamCreate(&stream_0); //cudaMalloc(&d1_buffer, (size*sizeof(RGBA))); //RGBA* result = translate_px_to_vec(imageData); result = translate_px_to_vec(imageData); //cudaSetDevice(gpu2); //cudaMalloc(&d2_buffer, (size*sizeof(RGBA))); cudaMemcpyPeer(d2_buffer, gpu2, result, gpu1, (size*sizeof(RGBA))); cudaDeviceSynchronize(); cudaSetDevice(gpu2); for (int i = 0; i < processar/2; i++){ sepia<<<size/512,512>>>(d2_buffer); cudaMemcpyPeer(d1_buffer, gpu1, d2_buffer, gpu2, (size*sizeof(RGBA))); cudaDeviceSynchronize(); cudaSetDevice(gpu1); negative<<<size/512,512>>>(d1_buffer); cudaMemcpyPeer(d2_buffer, gpu2, d1_buffer, gpu1, (size*sizeof(RGBA))); cudaDeviceSynchronize(); cudaSetDevice(gpu2); } cudaMemcpy(result,d2_buffer,(size*sizeof(RGBA)),cudaMemcpyDeviceToHost); translate_vec_to_px(result,imageData); cont++; char test[16]; sprintf(test,"finalteste1.png"); write_png_file(test,imageData); } cudaFree(d1_buffer); cudaSetDevice(gpu2); cudaFree(d2_buffer); cudaFree(result); end = clock(); double duration = (double)(end - start) / CLOCKS_PER_SEC; printf("EXECUTION_TIME = %f\n",duration); }
3,731
//pass //--blockDim=64 --gridDim=64 --no-inline #include "cuda.h" __device__ int f(int x) { return x + 1; } __global__ void foo() { int y = f(2); }
3,732
#include "includes.h" __global__ void histogram_kernel(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins){ extern __shared__ unsigned int bins_s[]; //Shared Memory int thid = threadIdx.x; while(thid < num_bins){ bins_s[thid] = 0u; thid += blockDim.x; } __syncthreads(); //Histogram calculation unsigned int element = blockIdx.x * blockDim.x + threadIdx.x; while(element < num_elements){ atomicAdd(&(bins_s[input[element]]), 1); element += blockDim.x * gridDim.x; } __syncthreads(); //Global Memory thid = threadIdx.x; while(thid < num_bins){ atomicAdd(&(bins[thid]), bins_s[thid]); thid += blockDim.x; } }
3,733
#include<stdio.h> __global__ void foo() {} int main(){ foo<<<1,1>>>(); cudaDeviceSynchronize(); printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError())); return 0; }
3,734
#include "includes.h" # include <bits/stdc++.h> # include <cuda.h> #define SIZE 60// Global Size #define BLOCK_SIZE 1024 using namespace std; //::::::::::::::::::::::::::::::::::::::::::GPU:::::::::::::::::::::::::::::::: // :::: Kernel // :::: Calls __global__ void kernel_prefix_sum_inefficient(double *g_idata,double *g_odata,int l){ // Sequential Addressing technique __shared__ double sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<l && tid !=0){ sdata[tid] = g_idata[i-1]; }else{ sdata[tid] = 0; } // do reduction in shared mem for(unsigned int s=1;s<=tid;s *=2){ __syncthreads(); sdata[tid]+=sdata[tid-s]; } // write result for this block to global mem g_odata[i] = sdata[tid]; }
3,735
#include <stdio.h> #include <cuda.h> #include <iostream> using std::cout; using std::endl; __global__ void square(float *a, int N) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < N) a[threadId] = a[threadId] * a[threadId]; } int main(void) { float *h_a, *d_a; const int N = 50; size_t size = N * sizeof(float); h_a = (float *) malloc(size); cudaMalloc((void **) &d_a, size); for(int i = 0; i < N; i++) h_a[i] = (float) i; cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); int block_size = 4; int n_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); square<<<n_blocks, block_size>>>(d_a, N); cudaMemcpy(h_a, d_a, sizeof(float)*N, cudaMemcpyDeviceToHost); bool pass = true; for(int i = 0; i < N; i++) if (h_a[i] != (i * i)) pass = false; if (pass) cout << "pass" << endl; else cout << "fail" << endl; free(h_a); cudaFree(d_a); }
3,736
#include "includes.h" __global__ void dummy() { }
3,737
// reference: https://gist.github.com/dpiponi/1502434 // compile: nvcc ./main.cu -o cuda // NOTES // - sudo apt-get install nvidia-cuda-toolkit // - must use .cu suffix to compile properly // - must have nVidia GPU with CUDA capabilities #define N 1000 #include <stdio.h> __global__ void fn( int *input, int *output ) { int i = blockIdx.x; if ( i < N ) { output[i] = input[i] * 2; } } int main() { int hostInput[N], hostOutput[N]; int *deviceInput, *deviceOutput; cudaError_t err = cudaSuccess; // allocate arrays on GPU, // cuda takes ownership of device pointers err = cudaMalloc( (void **) &deviceInput, sizeof(int)*N ); if ( err != cudaSuccess ) { fprintf( stderr, "could not allocate on GPU\n" ); } err = cudaMalloc( (void **) &deviceOutput, sizeof(int)*N ); if ( err != cudaSuccess ) { fprintf( stderr, "could not allocate on GPU\n" ); } // set input values for ( int i = 0; i < N; i++ ) hostInput[i] = i; // copy input to GPU err = cudaMemcpy( deviceInput, hostInput, sizeof(int)*N, cudaMemcpyHostToDevice ); if ( err != cudaSuccess ) { fprintf( stderr, "could not copy from hostInput to deviceInput\n" ); } // run GPU code on N threads- one per element fn<<<N, 1>>>( deviceInput, deviceOutput ); // copy output from GPU to CPU err = cudaMemcpy( hostOutput, deviceOutput, sizeof(int)*N, cudaMemcpyDeviceToHost ); if ( err != cudaSuccess ) { fprintf( stderr, "could not copy from deviceOutput to hostOutput\n" ); } for ( int i = 0; i < N; i++ ) { printf( "%d\n", hostOutput[i] ); } err = cudaFree( deviceInput ); if ( err != cudaSuccess ) { fprintf( stderr, "could not free deviceInput from GPU: %s\n", cudaGetErrorString(err) ); } err = cudaFree( deviceOutput ); if ( err != cudaSuccess ) { fprintf( stderr, "could not free deviceOutput from GPU: %s\n", cudaGetErrorString(err) ); } return 0; }
3,738
#include "includes.h" __global__ void InitComputeAccelKernel (double *CellAbscissa, double *CellOrdinate, double *Rmed, int nsec, int nrad) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = threadIdx.y + blockDim.y*blockIdx.y; if (i<nrad && j<nsec){ CellAbscissa[i*nsec+j] = Rmed[i] * cos((2.0*PI*(double)j)/(double)nsec); CellOrdinate[i*nsec+j] = Rmed[i] * sin((2.0*PI*(double)j)/(double)nsec); } }
3,739
#include "includes.h" #define FALSE 0 #define TRUE !FALSE #define NUMTHREADS 16 #define THREADWORK 32 __global__ void gpuSD(const float * vectsA, size_t na, const float * vectsB, size_t nb, size_t dim, const float * means, const float * numPairs, float * sds) { size_t offset, stride, tx = threadIdx.x, bx = blockIdx.x, by = blockIdx.y; float a, b, termA, termB; __shared__ float meanA, meanB, n, threadSumsA[NUMTHREADS], threadSumsB[NUMTHREADS]; if((bx >= na) || (by >= nb)) return; if(tx == 0) { meanA = means[bx*nb*2+by*2]; meanB = means[bx*nb*2+by*2+1]; n = numPairs[bx*nb+by]; } __syncthreads(); threadSumsA[tx] = 0.f; threadSumsB[tx] = 0.f; for(offset = tx; offset < dim; offset += NUMTHREADS) { a = vectsA[bx * dim + offset]; b = vectsB[by * dim + offset]; if(!(isnan(a) || isnan(b))) { termA = a - meanA; termB = b - meanB; threadSumsA[tx] += termA * termA; threadSumsB[tx] += termB * termB; } } __syncthreads(); for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) { if(tx < stride) { threadSumsA[tx] += threadSumsA[tx + stride]; threadSumsB[tx] += threadSumsB[tx + stride]; } __syncthreads(); } if(tx == 0) { sds[bx*nb*2+by*2] = sqrtf(threadSumsA[0] / (n - 1.f)); sds[bx*nb*2+by*2+1] = sqrtf(threadSumsB[0] / (n - 1.f)); } }
3,740
__global__ void update_uo_multi_kernel0(double * d0_u, double * d0_uo, int N){ int i = blockIdx.x * blockDim.x + threadIdx.x + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; d0_uo[i*N + j] = d0_u[i*N + j]; } __global__ void update_uo_multi_kernel1(double * d1_u, double * d1_uo, int N){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; d1_uo[i*N + j] = d1_u[i*N + j]; }
3,741
#include "includes.h" __global__ void rotateArray(int *c, int numThreads) { int nextIndex = (threadIdx.x + 1)%numThreads; int val = c[nextIndex]; __syncthreads(); c[threadIdx.x] = val; }
3,742
#include "includes.h" __global__ void activate_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *output_gpu) { int i = blockIdx.x * blockDim.x + threadIdx.x; int wh_i = i % wh_step; int b = i / wh_step; const float eps = 0.0001; if (i < size) { float sum = eps; int k; for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; if (val > 0) sum += val; } for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; if (val > 0) val = val / sum; else val = 0; output_gpu[wh_i + k * wh_step + b*wh_step*channels] = val; } } }
3,743
// //#include "cuda_runtime.h" //#include "device_launch_parameters.h" // //#include <stdio.h> // //cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); // //__global__ void addKernel(int *c, const int *a, const int *b) //{ // int i = threadIdx.x; // c[i] = a[i] + b[i]; //} // //int main() //{ // const int arraySize = 5; // const int a[arraySize] = { 1, 2, 3, 4, 5 }; // const int b[arraySize] = { 10, 20, 30, 40, 50 }; // int c[arraySize] = { 0 }; // // // Add vectors in parallel. // cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "addWithCuda failed!"); // return 1; // } // // printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", // c[0], c[1], c[2], c[3], c[4]); // // // cudaDeviceReset must be called before exiting in order for profiling and // // tracing tools such as Nsight and Visual Profiler to show complete traces. // cudaStatus = cudaDeviceReset(); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaDeviceReset failed!"); // return 1; // } // // return 0; //} // //// Helper function for using CUDA to add vectors in parallel. //cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) //{ // int *dev_a = 0; // int *dev_b = 0; // int *dev_c = 0; // cudaError_t cudaStatus; // // // Choose which GPU to run on, change this on a multi-GPU system. // cudaStatus = cudaSetDevice(0); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); // goto Error; // } // // // Allocate GPU buffers for three vectors (two input, one output) . // cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMalloc failed!"); // goto Error; // } // // cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMalloc failed!"); // goto Error; // } // // cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMalloc failed!"); // goto Error; // } // // // Copy input vectors from host memory to GPU buffers. // cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // goto Error; // } // // cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // goto Error; // } // // // Launch a kernel on the GPU with one thread for each element. // addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // // // Check for any errors launching the kernel // cudaStatus = cudaGetLastError(); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); // goto Error; // } // // // cudaDeviceSynchronize waits for the kernel to finish, and returns // // any errors encountered during the launch. // cudaStatus = cudaDeviceSynchronize(); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); // goto Error; // } // // // Copy output vector from GPU buffer to host memory. // cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // goto Error; // } // //Error: // cudaFree(dev_c); // cudaFree(dev_a); // cudaFree(dev_b); // // return cudaStatus; //} #include <cuda.h> #include <stdio.h> void cudasafe(int error, char* message, char* file, int line) { if (error != cudaSuccess) { fprintf(stderr, "CUDA Error: %s : %i. In %s line %d\n", message, error, file, line); exit(-1); } } int main(int argc, char ** argv) { int deviceCount; cudasafe(cudaGetDeviceCount(&deviceCount), "GetDeviceCount", __FILE__, __LINE__); printf("Number of CUDA devices %d.\n", deviceCount); for (int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudasafe(cudaGetDeviceProperties(&deviceProp, dev), "Get Device Properties", __FILE__, __LINE__); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { printf("No CUDA GPU has been detected\n"); return -1; } else if (deviceCount == 1) { printf("There is 1 device supporting CUDA\n"); } else { printf("There are %d devices supporting CUDA\n", deviceCount); } } printf("For device #%d\n", dev); printf("Device name: %s\n", deviceProp.name); printf("Major revision number: %d\n", deviceProp.major); printf("Minor revision Number: %d\n", deviceProp.minor); printf("Total Global Memory: %d\n", deviceProp.totalGlobalMem); printf("Total shared mem per block: %d\n", deviceProp.sharedMemPerBlock); printf("Total const mem size: %d\n", deviceProp.totalConstMem); printf("Warp size: %d\n", deviceProp.warpSize); printf("Maximum block dimensions: %d x %d x %d\n", deviceProp.maxThreadsDim[0], \ deviceProp.maxThreadsDim[1], \ deviceProp.maxThreadsDim[2]); printf("Maximum grid dimensions: %d x %d x %d\n", deviceProp.maxGridSize[0], \ deviceProp.maxGridSize[1], \ deviceProp.maxGridSize[2]); printf("Clock Rate: %d\n", deviceProp.clockRate); printf("Number of muliprocessors: %d\n", deviceProp.multiProcessorCount); } return 0; }
3,744
#include<stdio.h> #include<stdlib.h> #define N 512 void host_add(int *a, int *b, int *c) { for(int idx=0;idx<N;idx++) c[idx] = a[idx] + b[idx]; } //basically just fills the array with index. void fill_array(int *data) { for(int idx=0;idx<N;idx++) data[idx] = idx; } void print_output(int *a, int *b, int*c) { for(int idx=0;idx<N;idx++) printf("\n %d + %d = %d", a[idx] , b[idx], c[idx]); } int main(void) { int *a, *b, *c; int size = N * sizeof(int); // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); fill_array(a); b = (int *)malloc(size); fill_array(b); c = (int *)malloc(size); host_add(a,b,c); print_output(a,b,c); free(a); free(b); free(c); return 0; }
3,745
__global__ void transposeOptimized(float *input, float *output, int m, int n){ int colID_input = threadIdx.x + blockDim.x*blockIdx.x; int rowID_input = threadIdx.y + blockDim.y*blockIdx.y; __shared__ float sdata[32][33]; // bank ~ 一次传32 words,32次访问 ~ 32次unit time,所以希望存在shared memory里的数据尽可能多地分布在不同bank上 // 希望shared memory中每列数据所在的bank尽可能多 if (rowID_input < m && colID_input < n) { int index_input = colID_input + rowID_input*n; sdata[threadIdx.y][threadIdx.x] = input[index_input]; __syncthreads(); int dst_col = threadIdx.x + blockIdx.y * blockDim.y; int dst_row = threadIdx.y + blockIdx.x * blockDim.x; output[dst_col + dst_row*m] = sdata[threadIdx.x][threadIdx.y]; } }
3,746
#include "includes.h" __global__ void reduction_interleaved_unrolling_blocks2_1(int * input, int * temp, int size) { int tid = threadIdx.x; //start index for this thread int index = blockDim.x * blockIdx.x * 2 + threadIdx.x; //local index for this block int * i_data = input + blockDim.x * blockIdx.x * 2; //unrolling two blocks if ((index + blockDim.x)< size) { input[index] += input[index + blockDim.x]; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2) { if (tid < offset) { i_data[tid] += i_data[tid + offset]; } __syncthreads(); } if (tid == 0) { temp[blockIdx.x] = i_data[0]; } }
3,747
#include <stdio.h> #include <time.h> #include <unistd.h> #include <stdlib.h> #include <math.h> using namespace std; __global__ void _copy_dr_to_de(int *d_e,int *d_r,int X,int Y){ int x,y; x = threadIdx.x + (blockIdx.x*blockDim.x); y = threadIdx.y + (blockIdx.y*blockDim.y); int h_r_i = x + ( y * (X) ); if(x<X && y<Y) d_e[h_r_i] = d_r[h_r_i]; } __global__ void _2Dstencil_global(int *d_e,int *d_r,float *c_coeff,int X,int Y,int k){ int x,y,h_e_i,h_r_i,temp; x = threadIdx.x + (blockIdx.x*blockDim.x); y = threadIdx.y + (blockIdx.y*blockDim.y); h_r_i = x + ( y * (X) ); temp = d_e[h_r_i]*c_coeff[0]; for(int lk = 1;lk<(k/2)+1;lk++) { if(x+lk < X) { h_e_i = (x+lk) + ( (y) * (X) ); temp += d_e[h_e_i]*c_coeff[lk]; } if(x-lk >= 0) { h_e_i = (x-lk) + ( (y) * (X) ); temp += d_e[h_e_i]*c_coeff[lk]; } if(y+lk < Y) { h_e_i = (x) + ( (y+lk) * (X) ); temp += d_e[h_e_i]*c_coeff[lk]; } if(y-lk >= 0) { h_e_i = (x) + ( (y-lk) * (X) ); temp += d_e[h_e_i]*c_coeff[lk]; } } d_r[h_r_i] = temp; } int main(int argc, char* argv[]) { int *h_e,*h_r; int *d_e, *d_r; int size,tam; int X=32; int Y=32; int k=4; int times = 1; int BX=32; int BY=32; int GX=1; int GY=1; float *c_coeff,*d_c_coeff; if(argc > 1) { X = atoi(argv[1]); Y = X; } if(argc > 2) { k = atoi(argv[2]); } if(argc > 3) { times = atoi(argv[3]); } if(X>32) { GX = ceil((float)X/(float)32); BX = 32; } if(Y>32) { GY = ceil((float)Y/(float)32); BY = 32; } dim3 block_dim(BX,BY,1); dim3 grid_dim(GX,GY,1); size = X * Y * sizeof(int); tam = X * Y; h_e = (int*) malloc(size); h_r = (int*) malloc(size); c_coeff = (float*)malloc((k/2+1)*sizeof(float)); cudaMalloc(&d_e, size); cudaMalloc(&d_r, size); cudaMalloc(&d_c_coeff,(k/2+1)*sizeof(float)); printf("\n coefs \n"); for(int i=0;i<(k/2+1);i++) { c_coeff[i]=(float)((k/2+1)-i)/(float)(k/2+1); } for(int i=0;i<(k/2+1);i++) { printf(" %f",c_coeff[i]); } printf("\n coefs \n"); FILE *arq; arq = fopen("entrada.txt", "rt"); for(int i=0;i<X;i++) for(int j=0;j<Y;j++) fscanf(arq," %d",&h_e[i+j*X]); fclose(arq); /* Copy vectors from host memory to device memory */ cudaMemcpy(d_e, h_e, size, cudaMemcpyHostToDevice); cudaMemcpy(d_c_coeff, c_coeff, (k/2+1)*sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate (&start); cudaEventCreate (&stop); cudaEventRecord (start, 0); /****************** *** Kernel Call *** *******************/ //_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z); for(int t=0;t<times;t++) { _2Dstencil_global<<<grid_dim,block_dim>>>(d_e,d_r,d_c_coeff,X,Y,k); _copy_dr_to_de<<<grid_dim,block_dim>>>(d_e,d_r,X,Y); } cudaError_t err = cudaSuccess; err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err)); } /****************** *** Kernel Call *** *******************/ cudaDeviceSynchronize(); cudaEventRecord (stop, 0); cudaEventSynchronize (stop); float elapsedTime; cudaEventElapsedTime (&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY); printf ("[%d,%.5f],\n", tam,elapsedTime); cudaMemcpy(h_r, d_r, size, cudaMemcpyDeviceToHost); arq = fopen("resultado.txt", "wt"); for(int i=0;i<X;i++) { for(int j=0;j<Y;j++) { fprintf(arq," %d",h_r[i+j*X]); } fprintf(arq,"\n"); } fclose(arq); cudaFree(d_e); cudaFree(d_r); cudaFree(d_c_coeff); std::free(h_e); std::free(h_r); std::free(c_coeff); return 0; } /* main */
3,748
#include <bits/stdc++.h> #include <cuda.h> using namespace std; #define N ((int)1e3) #define TILE 32 #define CEIL(a, b) ((a-1)/b +1) __global__ void multiply(float *d_a, float *d_b, float *d_c) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; __shared__ float a[TILE][TILE]; __shared__ float b[TILE][TILE]; float cij = 0.0; for(int k=0; k<CEIL(N, TILE); k++) { // Copy the kth tile on the horizontal strip from A if(x<N && TILE*k + threadIdx.y < N) a[threadIdx.x][threadIdx.y] = d_a[x*N + TILE*k + threadIdx.y]; else a[threadIdx.x][threadIdx.y] = 0.0; // Copy the kth tile on the vertical strip from B if(y<N && TILE*k + threadIdx.x < N) b[threadIdx.x][threadIdx.y] = d_b[(TILE*k + threadIdx.x)*N + y]; else b[threadIdx.x][threadIdx.y] = 0.0; // Wait for all elements to be copied __syncthreads(); // Do all operations related to these tiles before moving to next tile for(int kk=0; kk<TILE; kk++) cij += a[threadIdx.x][kk] * b[kk][threadIdx.y]; // Wait before copying next tile __syncthreads(); } if(x < N && y < N) d_c[x*N + y] = cij; } template <class T> void testSolution(T *h_a, T *h_b, T *h_c, float precision=0.0) { int errors = 0; for(int i=0; i<N; i++) for(int j=0; j<N; j++) { float exp = 0.0, act; for(int k=0; k<N; k++) exp += h_a[i*N + k] * h_b[k*N + j]; act = h_c[i*N + j]; if(abs(act-exp) / (max(exp, precision)) > precision) { errors++; if(errors <= 10) printf("Test failed at index : (%d, %d) [Expected: %10.2f | Got: %10.2f]\n", i, j, exp, act); } } if(errors) printf("\n%d Tests failed!\n\n", errors); else printf("All tests passed !\n\n"); } int main() { float *h_a, *h_b, *h_c; float *d_a, *d_b, *d_c; // Allocate host memory h_a = new float[N*N]; h_b = new float[N*N]; h_c = new float[N*N]; printf("\nSize of matrices : %d x %d\n\n", N, N); srand(time(0)); for(int i=0; i<N; i++) for(int j=0; j<N; j++) { h_a[i*N + j] = (rand()%N) *1.0/ (rand()%N +1); h_b[i*N + j] = (rand()%N) *1.0/ (rand()%N +1); } // Allocate memory on device cudaMalloc((void**)&d_a, N*N*sizeof(float)); cudaMalloc((void**)&d_b, N*N*sizeof(float)); cudaMalloc((void**)&d_c, N*N*sizeof(float)); //Copy data into device memory cudaMemcpy(d_a, h_a, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, N*N*sizeof(float), cudaMemcpyHostToDevice); // Kernel call dim3 grid(CEIL(N, 32), CEIL(N, 32), 1); dim3 block(32, 32, 1); multiply <<<grid, block>>> (d_a, d_b, d_c); cudaThreadSynchronize(); // Copy data back to host cudaMemcpy(h_c, d_c, N*N*sizeof(float), cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); testSolution(h_a, h_b, h_c, 1e-3); //Tolerates 0.1% relative error // Free host memory delete[] h_a, h_b, h_c; }
3,749
#include <cstdio> #include <cstdlib> #include <cuda_runtime.h> #include <sys/time.h> #define random(a, b) (rand() % (b - a) + a) #define index(i, j, col) (((i) * (col)) + (j)) void PrintMatrix(float *A, int row, int col); void FillMatrix(float *matrix, int row, int col, int padding); __global__ void im2col(float *matrix, int channel_id, int channel, float *matrix_flatten, int height_stride, int width_stride, int matrix_height, int matrix_width, int filter_height, int filter_width, int result_height, int result_width) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; // 展平 if (i < result_height && j < result_width) for (int x = 0; x < filter_height; x++) for (int y = 0; y < filter_width; y++) matrix_flatten[index(index(i, j, result_width), index(x, y, filter_width) + channel_id * filter_height * filter_width, channel * filter_height * filter_width)] = matrix[index(i * height_stride + x, j * width_stride + y, matrix_width)]; __syncthreads(); } __global__ void MatrixMulCUDA(float *A, float *B, float *C, int m, int n, int k) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0; if (i < m && j < k) { for (int x = 0; x < n; x++) sum += A[i * n + x] * B[x * k + j]; C[i * k + j] = sum; } } int main(int argc, char **argv) { if (argc != 5) { printf("Wrong Input!\n"); return 1; } int size = atoi(argv[1]); int stride = atoi(argv[2]); int x = atoi(argv[3]); int y = atoi(argv[4]); dim3 threadsPerBlock(x, y); int channel = 3; float *matrix[channel]; float *filter[channel]; float *result; int matrix_height = size; int matrix_width = size; int filter_height = 3; int filter_width = 3; int padding = ((((matrix_height - filter_height) / stride + 1) * stride - (matrix_height - filter_height)) % stride) / 2; int matrix_size = sizeof(float) * (matrix_height + 2 * padding) * (matrix_width + 2 * padding); int result_size = sizeof(float) * ((matrix_height - filter_height + 2 * padding) / stride + 1) * ((matrix_width - filter_width + 2 * padding) / stride + 1); int filter_size = sizeof(float) * filter_height * filter_width; for (int i = 0; i < channel; i++) { matrix[i] = (float *)malloc(matrix_size); memset(matrix[i], 0, sizeof(matrix[i])); FillMatrix(matrix[i], matrix_height, matrix_width, padding); } for (int i = 0; i < channel; i++) { filter[i] = (float *)malloc(filter_size); for (int j = 0; j < filter_height * filter_width; j++) filter[i][j] = j + 1; } result = (float *)malloc(result_size); timeval t1, t2; gettimeofday(&t1, NULL); float *cuda_matrix[channel]; float *cuda_filter[channel]; float *cuda_result; for (int i = 0; i < channel; i++) { cudaMalloc(&cuda_matrix[i], matrix_size); cudaMemcpy(cuda_matrix[i], matrix[i], matrix_size, cudaMemcpyHostToDevice); } for (int i = 0; i < channel; i++) { cudaMalloc(&cuda_filter[i], filter_size); cudaMemcpy(cuda_filter[i], filter[i], filter_size, cudaMemcpyHostToDevice); } cudaMalloc(&cuda_result, result_size); float *matrix_flatten; cudaMalloc(&matrix_flatten, result_size * filter_height * filter_width * channel); int result_height = (matrix_height - filter_height + 2 * padding) / stride + 1; int result_width = (matrix_width - filter_width + 2 * padding) / stride + 1; dim3 numBlocks((result_height % threadsPerBlock.x) ? result_height / threadsPerBlock.x + 1 : result_height / threadsPerBlock.x, (result_width % threadsPerBlock.y) ? result_width / threadsPerBlock.y + 1 : result_width / threadsPerBlock.y); for (int i = 0; i < channel; i++) im2col<<<numBlocks, threadsPerBlock>>>(cuda_matrix[i], i, channel, matrix_flatten, stride, stride, matrix_height + 2 * padding, matrix_width + 2 * padding, filter_height, filter_width, result_height, result_width); float *filter_flatten; cudaMalloc(&filter_flatten, filter_size * channel); for (int i = 0; i < channel; i++) cudaMemcpy(filter_flatten + i * (filter_height * filter_width), filter[i], filter_size, cudaMemcpyHostToDevice); dim3 numBlocks1((result_height * result_width % threadsPerBlock.x) ? result_height * result_width / threadsPerBlock.x + 1 : result_height * result_width / threadsPerBlock.x, (filter_height * filter_width % threadsPerBlock.y) ? filter_height * filter_width / threadsPerBlock.y + 1 : filter_height * filter_width / threadsPerBlock.y); MatrixMulCUDA<<<numBlocks1, threadsPerBlock>>>(matrix_flatten, filter_flatten, cuda_result, result_height * result_width, filter_height * filter_width * channel, 1); gettimeofday(&t2, NULL); printf("Matrix Size:%d\tStride:%d\n", size, stride); printf("Calculation time:%ldms\n", t2.tv_sec * 1000 + t2.tv_usec/1000 - t1.tv_sec * 1000 - t1.tv_usec/1000); cudaMemcpy(result, cuda_result, result_size, cudaMemcpyDeviceToHost); // for (int i = 0; i < channel; i++) // { // printf("Matrix after padding of channel %d:\n",i); // PrintMatrix(matrix[i], matrix_height + 2 * padding, matrix_width + 2 * padding); // } // for (int i = 0; i < channel; i++) // { // printf("Filter of channel %d:\n",i); // PrintMatrix(filter[i], filter_height, filter_width); // } // printf("Result:\n"); // PrintMatrix(result, ((matrix_height - filter_height + 2 * padding) / stride + 1), ((matrix_width - filter_width + 2 * padding) / stride + 1)); for (int i = 0; i < channel; i++) cudaFree(cuda_matrix[i]); for (int i = 0; i < channel; i++) cudaFree(cuda_filter[i]); cudaFree(cuda_result); for (int i = 0; i < channel; i++) free(matrix[i]); for (int i = 0; i < channel; i++) free(filter[i]); free(result); } void FillMatrix(float *matrix, int row, int col, int padding) { for (int i = padding; i < row + padding; i++) for (int j = padding; j < col + padding; j++) matrix[index(i, j, col + 2 * padding)] = random(0, 9); } void PrintMatrix(float *A, int row, int col) { for (int i = 0; i < row; ++i) { for (int j = 0; j < col; ++j) printf("%f ", A[i * col + j]); printf("\n"); } }
3,750
#include <stdio.h> __global__ void gpu_shared_memory(float *d_a) { int i, index = threadIdx.x; float average, sum = 0.0f; // Defining shared memory __shared__ float sh_arr[10]; sh_arr[index] = d_a[index]; // This directive ensure all the writes to shared memory have completed __syncthreads(); for (i = 0; i <= index; i++) { sum += sh_arr[i]; } average = sum / (index + 1.0f); d_a[index] = average; // This statement is redundant and will have no effect on overall code // execution sh_arr[index] = average; } int main(int argc, char **argv) { float h_a[10]; float *d_a; // Initialize host Array for (int i = 0; i < 10; i++) { h_a[i] = i; } // allocate global memory on the device cudaMalloc((void **)&d_a, sizeof(float) * 10); // copy data from host memory to device memory cudaMemcpy((void *)d_a, (void *)h_a, sizeof(float) * 10, cudaMemcpyHostToDevice); gpu_shared_memory<<<1, 10>>>(d_a); // copy the modified array back to the host cudaMemcpy((void *)h_a, (void *)d_a, sizeof(float) * 10, cudaMemcpyDeviceToHost); printf("Use of Shared Memory on GPU: \n"); for (int i = 0; i < 10; i++) { printf("The running average after %d element is %f \n", i, h_a[i]); } return 0; }
3,751
// compute.cu // // driver and kernel call #include <stdio.h> // for printf #include <stdlib.h> // for malloc #include <string.h> // for memcpy() #include <unistd.h> // for sleep() #include <math.h> // for pow() #include <stdbool.h> // for bool #define THREADS_PER_BLOCK 512 __global__ void compute_d (double *a_d, double *b_d, double *c_d, double *d_d, double *e_d, double *f_d, int n) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < n) { for (int i = 0; i < n; i++) { e_d[x] = e_d[x] + (a_d[i] * c_d[x + i * n]) + (b_d[i] * d_d[x + i * n] * -1); f_d[x] = f_d[x] + (a_d[i] * d_d[x + i * n]) + (b_d[i] * c_d[x + i * n]); } } __syncthreads(); } extern "C" void matrixMultiplication(double *quregReal, double *quregImg, double *qureg2Real, double *qureg2Img, long long arraySize) { double *a_d, *b_d, *c_d, *d_d, *e_d, *f_d; cudaMalloc ((void**) &a_d, sizeof(double) * arraySize); cudaMalloc ((void**) &b_d, sizeof(double) * arraySize); cudaMalloc ((void**) &c_d, sizeof(double) * arraySize * arraySize); cudaMalloc ((void**) &d_d, sizeof(double) * arraySize * arraySize); cudaMalloc ((void**) &e_d, sizeof(double) * arraySize); cudaMalloc ((void**) &f_d, sizeof(double) * arraySize); cudaMemcpy (a_d, quregReal, sizeof(double) * arraySize, cudaMemcpyHostToDevice); cudaMemcpy (b_d, quregImg, sizeof(double) * arraySize, cudaMemcpyHostToDevice); cudaMemcpy (c_d, qureg2Real, sizeof(double) * arraySize * arraySize, cudaMemcpyHostToDevice); cudaMemcpy (d_d, qureg2Img, sizeof(double) * arraySize * arraySize, cudaMemcpyHostToDevice); compute_d <<< ceil((float) arraySize/THREADS_PER_BLOCK), THREADS_PER_BLOCK >>> (a_d, b_d, c_d, d_d, e_d, f_d, arraySize); cudaMemcpy (quregReal, e_d, sizeof(double) * arraySize, cudaMemcpyDeviceToHost); cudaMemcpy (quregImg, f_d, sizeof(double) * arraySize, cudaMemcpyDeviceToHost); cudaFree (a_d); cudaFree (b_d); cudaFree (c_d); cudaFree (d_d); }
3,752
/* * cuda_flow_recog8u_binary.c * * Created on: Jan 27, 2016 * Author: sled */ // // Threshold-based Contrasting GPU implementation // #include <unistd.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #define THREADS_PER_BLOCK 128 // CUDA kernel declaration __global__ void cuda_flow_binary_kernel(float *flowx, float *flowy, unsigned char *u8res, float thresh, int N); // C/C++ Wrapper void gpu_flow_recogu8_binary(float *flowx, float *flowy, unsigned char *u8res, float thresh, int N) { // Device memory float *dev_flowx; float *dev_flowy; uint8_t *dev_u8res; // Allocated device memory cudaMalloc((void **)&dev_flowx, N * sizeof(float)); cudaMalloc((void **)&dev_flowy, N * sizeof(float)); cudaMalloc((void **)&dev_u8res, N * sizeof(uint8_t)); // Upload data to device memory cudaMemcpy(dev_flowx, flowx, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_flowy, flowy, N * sizeof(float), cudaMemcpyHostToDevice); cuda_flow_binary_kernel<<<((N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dev_flowx, dev_flowy, dev_u8res, thresh, N); cudaMemcpy(u8res, dev_u8res, N * sizeof(uint8_t), cudaMemcpyDeviceToHost); cudaFree(dev_flowx); cudaFree(dev_flowy); } // u8res contains a binary map of optical flow regions with motion intensity over the threshold __global__ void cuda_flow_binary_kernel(float *flowx, float *flowy, unsigned char *u8res, float thresh, int N) { int idx = threadIdx.x + (blockDim.x *blockIdx.x); if (idx < N) { if (isnan(flowx[idx]) || isnan(flowy[idx]) || fabs(flowx[idx]) < 1e9 || fabs(flowy[idx]) < 1e9) return; if (sqrt(flowx[idx]*flowx[idx] + flowy[idx]*flowy[idx]) >= thresh) { u8res[idx] = 1; } else { u8res[idx] = 0; } } }
3,753
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <cuda.h> static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { printf("cuda error \n"); exit(1); } } __global__ void vectorAddKernel(float* deviceA, float* deviceB, float* deviceResult) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; // insert operation here deviceResult[i] = deviceA[i]+deviceB[i]; } extern "C" void cuda_do_compute() { int threadBlockSize = 512; int n=1024; float a[1024], b[1024], result[1024]; // allocate the vectors on the GPU float* deviceA = NULL; checkCudaCall(cudaMalloc((void **) &deviceA, n * sizeof(float))); if (deviceA == NULL) { printf("Error in cudaMalloc! \n"); return; } float* deviceB = NULL; checkCudaCall(cudaMalloc((void **) &deviceB, n * sizeof(float))); if (deviceB == NULL) { checkCudaCall(cudaFree(deviceA)); printf("Error in cudaMalloc! \n"); return; } float* deviceResult = NULL; checkCudaCall(cudaMalloc((void **) &deviceResult, n * sizeof(float))); if (deviceResult == NULL) { checkCudaCall(cudaFree(deviceA)); checkCudaCall(cudaFree(deviceB)); printf("Error in cudaMalloc! \n"); return; } // copy the original vectors to the GPU checkCudaCall(cudaMemcpy(deviceA, a, n*sizeof(float), cudaMemcpyHostToDevice)); checkCudaCall(cudaMemcpy(deviceB, b, n*sizeof(float), cudaMemcpyHostToDevice)); // execute kernel vectorAddKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceA, deviceB, deviceResult); cudaDeviceSynchronize(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back checkCudaCall(cudaMemcpy(result, deviceResult, n * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaCall(cudaMemcpy(b, deviceB, n * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaCall(cudaFree(deviceA)); checkCudaCall(cudaFree(deviceB)); checkCudaCall(cudaFree(deviceResult)); }
3,754
__device__ double f(double x) { return cos(x); }
3,755
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #define N 2 __global__ void foo() { __shared__ int A[8]; A[0] = threadIdx.x; }
3,756
#include "includes.h" __global__ void Step(float * x, size_t idx, size_t N) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { if (x[(idx-1)*N+i] > 0 ) x[(idx-1)*N+i] = 1.0 ; else x[(idx-1)*N+i] = 0.0 ; } return; }
3,757
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /***************************************************************************** * * * * * * Compile with: * nvcc -o cudapassword 2initialpass_cuda.cu * * Dr Kevan Buckley, University of Wolverhampton, 2018 ****************************************************************************/ __device__ int is_a_match (char*attempt){ char plain_password1[] = "RA34"; char plain_password2[] = "SR56"; char plain_password3[] = "HV70"; char plain_password4[] = "TI52"; char *a=attempt; char *b=attempt; char *c=attempt; char *d=attempt; char *p1=plain_password1; char *p2=plain_password2; char *p3=plain_password3; char *p4=plain_password4; while (*a == *p1){ if (*a == '\0') { printf("found password: %s\n",plain_password1); break; } a++; p1++; } while (*b == *p2){ if (*b == '\0') { printf("found password: %s\n",plain_password2); break; } b++; p2++; } while (*c == *p3){ if (*c == '\0') { printf("found password: %s\n",plain_password3); break; } c++; p3++; } while (*d == *p4){ if (*d == '\0') { printf("found password: %s\n",plain_password4); break; } d++; p4++; } return 0; } __global__ void kernel (){ char s,a; char password[5]; password [4] = '\0'; int i = threadIdx.x+65; int j = threadIdx.y+65; char firstvalue = i ; char secondvalue = j ; password[0] = firstvalue ; password [1] = secondvalue; for (s='0';s<='9'; s++){ for (a='0';a<='9'; a++){ password[2]= s; password[3]= a; is_a_match(password); } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); dim3 dim (26,26); kernel <<<1,dim>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference (&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9fs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
3,758
#include <iostream> #include <cuda_runtime.h> #include <stdlib.h> #include <time.h> // Variables globales GPU y CPU #define l_kernel 3 #define stride 3 /****************************** * Procesamiento Matriz CPU * ******************************/ /* * Funcion Max */ float MaxCPU(float A, float B){ float result = A > B ? A : B; return result; } /* * Lectura Archivo */ void Read(float** R, float** G, float** B, int *M, int *N, const char *filename, int tipo) { FILE *fp; fp = fopen(filename, "r"); fscanf(fp, "%d %d\n", M, N); int imsize = (*M) * (*N); float* R1 = new float[imsize]; float* G1 = new float[imsize]; float* B1 = new float[imsize]; if (tipo == 0){ // Lectura normal for(int i = 0; i < imsize; i++) fscanf(fp, "%f ", &(R1[i])); for(int i = 0; i < imsize; i++) fscanf(fp, "%f ", &(G1[i])); for(int i = 0; i < imsize; i++) fscanf(fp, "%f ", &(B1[i])); } fclose(fp); *R = R1; *G = G1; *B = B1; } /* * Escritura Archivo */ void Write(float* R, float* G, float* B, int M, int N, const char *filename) { FILE *fp; fp = fopen(filename, "w"); fprintf(fp, "%d %d\n", M, N); for(int i = 0; i < M*N-1; i++) fprintf(fp, "%f ", R[i]); fprintf(fp, "%f\n", R[M*N-1]); for(int i = 0; i < M*N-1; i++) fprintf(fp, "%f ", G[i]); fprintf(fp, "%f\n", G[M*N-1]); for(int i = 0; i < M*N-1; i++) fprintf(fp, "%f ", B[i]); fprintf(fp, "%f\n", B[M*N-1]); fclose(fp); } /* * Imprimir Array como matriz */ void ShowMatrix(float *matrix, int N, int M) { for(int i = 0; i < N; i++){ for(int j = 0; j < M; j++) printf("%.1f ", matrix[j + i*M]); printf("\n"); } printf("\n"); } /* * "Producto" Matricial sub_A * kernel = C * id: id del primer elemento de la submatriz, N: ancho matriz R,G,B */ float Product_Matrix(float *A, float *B, int N, int id){ int col, row, idx_kernel; float count; col = id%N; row = id/N; count = 0.0; // Recorremos stride idx_kernel = 0; for(int i=row; i < row+l_kernel; i++){ for(int j=col; j< col+l_kernel; j++){ int id_casilla = j + i*N; // printf("%.1f x %.1f\n", A[id_casilla], B[idx_kernel]); count += A[id_casilla] * B[idx_kernel]; idx_kernel += 1; } } return count; } __global__ void convolucion(float *f, float *f_out ,float* kernel, int N, int Nres){ int tid = threadIdx.x + blockDim.x * blockIdx.x; int x,y; if(tid < 800*800){ //1 thread para cada pixel de salida x = 1 + (tid%Nres)*stride; //coordenaas del centro de cada sub_matriz y = 1 + (tid/Nres)*stride; float suma = 0; int indice_sub_matriz, indice_kernel; for (int i = -1; i<=1 ; i++){ for (int j = -1; j <= 1; j++){ indice_sub_matriz = (x+i) + (y+j)*N; indice_kernel = (1+i) + (1+j)*3; suma += f[indice_sub_matriz] * kernel[indice_kernel]; } } printf("%f\n", suma); f_out[tid] = suma; } } __device__ float max_pool(float *f, int N, int x, int y){ //recorre una sub matriz de 2x2 y encuentra el valor máximo float valores[4] = { f[(x+0) + (y+0)*N], f[(x+1) + (y+0)*N], f[(x+0) + (y+1)*N], f[(x+1) + (y+1)*N] }; int max = 0; for (int i = 0; i< 4; i++){ if (valores[i] > max){ max = valores[i]; } } return max; } __global__ void pooling(float *f, float *f_out, int N){ int tid = threadIdx.x + blockDim.x * blockIdx.x; int x,y; if(tid < 400*400){ //1 thread para cada pixel de salida x = 1 + (tid%N)*stride; y = 1 + (tid/N)*stride; f_out[tid] = max_pool(f, N , x, y); } } /* * Codigo Principal */ int main(int argc, char **argv){ /* * Inicializacion */ int M, N; float kernel[l_kernel*l_kernel] = {-1, 1, 0, -1, 1, 0 ,-1, 1, 0}; // filtro a usar float *Rhost, *Ghost, *Bhost; float *Rhostout, *Ghostout, *Bhostout; float *R, *G, *B; float *Rout, *Gout, *Bout; int gs, bs = 256; float dt; cudaEvent_t ct1, ct2; // Lectura de archivo Read(&Rhost, &Ghost, &Bhost, &M, &N, "img_test.txt", 0); int Mres = M/l_kernel; int Nres = N/l_kernel; Rhostout = new float[Mres*Nres]; Ghostout = new float[Mres*Nres]; Bhostout = new float[Mres*Nres]; /* * Parte GPU */ gs = (int)ceil((float) Mres*Nres / bs); cudaMalloc((void**)&R, M * N * sizeof(float)); cudaMalloc((void**)&G, M * N * sizeof(float)); cudaMalloc((void**)&B, M * N * sizeof(float)); cudaMemcpy(R, Rhost, M * N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(G, Ghost, M * N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B, Bhost, M * N * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&Rout, Mres * Nres * sizeof(float)); cudaMalloc((void**)&Gout, Mres * Nres * sizeof(float)); cudaMalloc((void**)&Bout, Mres * Nres * sizeof(float)); cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); convolucion<<<gs, bs>>>(R, Rout, kernel, N, Nres); convolucion<<<gs, bs>>>(G, Gout, kernel, N, Nres); convolucion<<<gs, bs>>>(B, Bout, kernel, N, Nres); cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); cudaMemcpy(Rhostout, Rout, M * N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(Ghostout, Gout, M * N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(Bhostout, Bout, M * N * sizeof(float), cudaMemcpyHostToDevice); std::cout << "Tiempo: " << dt << "[ms]" << std::endl; Write(Rhostout, Ghostout, Bhostout, Mres, Nres, "resultado.txt"); /* * Memoria Global */ /* * Memoria Compartida */ cudaFree(R); cudaFree(G); cudaFree(B); cudaFree(Rout); cudaFree(Gout); cudaFree(Bout); delete[] Rhost; delete[] Ghost; delete[] Bhost; delete[] Rhostout; delete[] Ghostout; delete[] Bhostout; return 0; }
3,759
#include "includes.h" __global__ void set_chunk_data( int x, int y, double dx, double dy, double* cell_x, double* cell_y, double* cell_dx, double* cell_dy, double* vertex_x, double* vertex_y, double* volume, double* x_area, double* y_area) { const int gid = blockIdx.x*blockDim.x+threadIdx.x; if(gid < x) { cell_x[gid] = 0.5*(vertex_x[gid]+vertex_x[gid+1]); cell_dx[gid] = dx; } if(gid < y) { cell_y[gid] = 0.5*(vertex_y[gid]+vertex_y[gid+1]); cell_dy[gid] = dy; } if(gid < x*y) { volume[gid] = dx*dy; } if(gid < (x+1)*y) { x_area[gid] = dy; } if(gid < x*(y+1)) { y_area[gid] = dx; } }
3,760
#include <iostream> #include <cmath> __global__ void add(int n, float* x, float* y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } int main() { int N = 1 << 28; size_t size = N * sizeof(float); float *x = (float*)malloc(size); float *y = (float*)malloc(size); float *d_x, *d_y; cudaMalloc(&d_x, size); cudaMalloc(&d_y, size); for (int i = 0; i < N; ++i) { x[i] = 1.0f; y[i] = 2.0f; } cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice); int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; cudaEvent_t start; cudaEvent_t stop; // Creating event cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); add<<<numBlocks, blockSize>>>(N, d_x, d_y); cudaEventRecord(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost); // cudaEventRecord(stop); // cudaEventSynchronize(stop); std::cout << milliseconds << " elapsed" << std::endl; cudaFree(d_x); cudaFree(d_y); free(x); free(y); return 0; }
3,761
#include<bits/stdc++.h> using namespace std; #define pi (2.0*acos(0.0)) #define eps 1e-6 #define ll long long #define inf (1<<29) #define vi vector<int> #define vll vector<ll> #define sc(x) scanf("%d",&x) #define scl(x) scanf("%lld",&x) #define all(v) v.begin() , v.end() #define me(a,val) memset( a , val ,sizeof(a) ) #define pb(x) push_back(x) #define pii pair<int,int> #define mp(a,b) make_pair(a,b) #define Q(x) (x) * (x) #define L(x) ((x<<1) + 1) #define R(x) ((x<<1) + 2) #define M(x,y) ((x+y)>>1) #define fi first #define se second #define MOD 1000000007 #define ios ios::sync_with_stdio(0) typedef struct CudaMatrixStruct { double *data; int height, width; } CudaMatrix; __global__ void CudaPrintMatrix(CudaMatrix *A){ printf("Elementos de CudaMatrix:\n"); for(int i = 0; i < A->height * A->width ; i++) printf("%d: %.3lf\n", i, A->data[i]); printf("CudaMatrix->height: %d CudaMatrix->width: %d\n", A->height, A->width); } void CudaCreateMatrix(CudaMatrix *&AA,int hei,int wid){ double *d;// = new double[hei*wid]; d = (double *)malloc( hei * wid * sizeof(double) ); for(int i = 0 ; i < hei * wid ; i++) d[i] = i + 1.0; double *data; cudaMalloc( (void **) &data, sizeof(double) * hei * wid ); cudaMemcpy( data , d , sizeof(double) * hei * wid , cudaMemcpyHostToDevice ); CudaMatrix *A = new CudaMatrix(); A->width = wid; A->height = hei; cudaMalloc((void **)&AA, sizeof(CudaMatrix)); cudaMemcpy(AA, A, sizeof(CudaMatrix), cudaMemcpyHostToDevice); cudaMemcpy(&(AA->data), &data, sizeof(double *), cudaMemcpyHostToDevice); CudaPrintMatrix<<<1, 1>>>(AA); //cudaFree(AA); //cudaFree(data); } int main() { CudaMatrix *devGr; int side = 1; CudaCreateMatrix( devGr , 2 * side + 1 , 2 * side + 1 ); //CudaPrintMatrix<<<1,1>>>(devGr);//IMPRIME LA MATRICES CREADA cudaFree(devGr); }
3,762
#include "includes.h" __global__ void CircumPlanetaryMassKernel (double *Dens, double *Surf, double *CellAbscissa, double *CellOrdinate, double xpl, double ypl, int nrad, int nsec, double HillRadius, double *mdcp0) /* LISTA */ { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = threadIdx.y + blockDim.y*blockIdx.y; double dist; if (i<nrad && j<nsec){ dist = sqrt((CellAbscissa[i*nsec + j]-xpl)*(CellAbscissa[i*nsec + j]-xpl) + (CellOrdinate[i*nsec + j]-ypl) * \ (CellOrdinate[i*nsec + j]-ypl)); if (dist < HillRadius) mdcp0[i*nsec + j] = Surf[i]* Dens[i*nsec + j]; else mdcp0[i*nsec + j] = 0.0; } }
3,763
#include <iostream> extern "C" __global__ void count_frequency(int * input, int * output, unsigned width, unsigned height) { int baseX = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = blockDim.x * gridDim.x; for(int elementIndex = baseX; elementIndex < width; elementIndex += totalThreads) { int i = elementIndex; int sum = 0; while(i < width * height) { sum += input[i]; i += width; } output[elementIndex] = sum; } } extern "C" __global__ void count_sets_frequencies(int * input, int * inputSets, int * output, unsigned width, unsigned height, unsigned setWidth, unsigned sets, unsigned minSup) { int baseX = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = blockDim.x * gridDim.x; for(int cid = baseX; cid < sets; cid += totalThreads) { int occuredSum = 0; int startPoint = cid * setWidth; for(int tid = 0; tid < width; tid++) { bool yes = true; int i = 0; while(i < setWidth) { if (input[inputSets[i + startPoint] * width + tid] != 1) { yes = false; i = setWidth; } else { i++; } } // we found in the transaction all elements from the checked set if(yes) { occuredSum++; } if(occuredSum > minSup) break; } output[cid] = occuredSum; } } /* extern "C" __global__ void count_frequency_matrix(int * input, int * inputSets, int * output, unsigned width, unsigned height, unsigned setWidth, unsigned sets) { int baseX = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = blockDim.x * gridDim.x; for(int c = 0; c < sets; c++) { int occuredSum = 0; for(int tid = baseX; tid < height; tid += totalThreads) { bool yes = true; for(int i = 0; i < setWidth; i++) { if (input[tid * width + inputSets[i + c * setWidth]] != 1) { yes = false; break; } } // we found in the transaction all elements from the checked set if(yes) { occuredSum++; } } output[baseX + c * totalThreads] = occuredSum; } } extern "C" __global__ void count_frequency_matrix2(int * input, int * inputSets, int * output, unsigned width, unsigned height, unsigned setWidth, unsigned sets) { int baseX = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = blockDim.x * gridDim.x; for(int tid = baseX; tid < height; tid += totalThreads) { for(int c = 0; c < sets; c++) { int occuredSum = output[baseX + c * totalThreads]; bool yes = true; for(int i = 0; i < setWidth; i++) { if (input[tid * width + inputSets[i + c * setWidth]] != 1) { yes = false; break; } } // we found in the transaction all elements from the checked set if(yes) { occuredSum++; output[baseX + c * totalThreads] = occuredSum; } } } }*/ // count sum for each column (which is support for candidate!) extern "C" __global__ void count_frequency_table(int * input, int * output, unsigned width, unsigned height) { int baseX = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = blockDim.x * gridDim.x; for(int id = baseX; id < height; id += totalThreads) { int sum = 0; int innerId = 0; while(innerId < width) { sum += input[innerId + id * width]; innerId += 1; } output[id] = sum; } }
3,764
#include <stdio.h> int main() { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); printf("Number of CUDA-enabled GPU devices: %d\n", count); for(int i=0; i<count; i++) { cudaGetDeviceProperties(&prop, i); printf("Device %d: \"%s\"\n", i, prop.name); printf(" Compute Capability: %d.%d\n", prop.major, prop.minor); printf(" Number of multi processor: %d\n", prop.multiProcessorCount); printf(" Number of scalar processor: %d\n", prop.multiProcessorCount*8); printf(" Clock rate: %d MHz\n", prop.clockRate/(1024)); printf(" Total amount of global memory: %1.2f GBytes\n", (float)(prop.totalGlobalMem)/(1024*1024*1024)); printf(" Total amount of shared memory per MP: %d KBytes\n", prop.regsPerBlock/1024); printf(" Threads in a warp: %d\n", prop.warpSize); printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock); printf(" Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Max grid dimensions: (%d, %d, %d)\n\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); } // select device satisfied given conditions int dev; memset(&prop, 0, sizeof(cudaDeviceProp)); prop.major = 1 ; prop.major = 3 ; cudaChooseDevice(&dev, &prop); printf("ID of selected CUDA device: %d\n", dev); cudaSetDevice(dev); return 0; }
3,765
#include <stdio.h> #include <time.h> void onCPU(float* A, float* B, float* xNow, float* xNext, int Ni) { int i,j; float sum; for (i=0; i<Ni; i++) { sum = 0.0; for (j=0; j<Ni; j++) { if (i != j) { sum += A[i*Ni + j] * xNow[j]; } } xNext[i] = (B[i] - sum) / A[i*Ni + i]; } } __global__ void onGPU(float* A, float* B, float* xNow, float* xNext, int Ni) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < Ni) { float sum = 0.0; int idx_Ai = idx*Ni; for (int j=0; j<Ni; j++) { if (idx != j) { sum += A[idx_Ai + j] * xNow[j]; } } xNext[idx] = (B[idx] - sum) / A[idx_Ai + idx]; } } int main( int argc, char *argv[] ) { time_t start_h, end_h, start_d, end_d; float timeCPU, timeGPU; float *xNow, *xNext, *A, *B, *xCPU, *xGPU; float *xNowDevice, *xNextDevice, *deviceA, *deviceB; int N, Ni, iter, tileSize, i , k; Ni=2048 , iter=10; N = Ni * Ni; if( argc == 2 ) { tileSize = atoi(argv[1]); } else if( argc > 2 ) { printf("Too many arguments supplied.\n"); exit(0); } else { printf("Usage: ./jacobi argument(int).\n"); exit(0); } printf("Jacobi method:\n\n"); printf("N = %d, Ni = %d, ", N, Ni); printf("thread = %d\n", tileSize); A = (float *) malloc(N*sizeof(float)); B = (float *) malloc(Ni*sizeof(float)); xNext = (float *) malloc(Ni*sizeof(float)); xNow = (float *) malloc(Ni*sizeof(float)); xCPU = (float *) malloc(Ni*sizeof(float)); xGPU = (float *) malloc(Ni*sizeof(float)); for (i=0; i<Ni; i++) { xNow[i] = 0; xNext[i] = 0; } for (i = 0; i < N; i ++){ A[i] = rand()/(float)RAND_MAX; } for (i = 0; i < Ni; i++){ B[i] = rand()/(float)RAND_MAX; } /* A[0] = 4; A[1] = 0.24; A[2] = -0.08; A[3] = 0.09; A[4] = 3; A[5] = -0.15; A[6] = 0.04; A[7] = -0.08; A[8] = 4; B[0] = 8; B[1] = 9; B[2] = 20; */ //============================================= CPU =============================================// start_h = clock(); for (k=0; k<iter; k++) { if (k%2) { onCPU( A, B, xNow, xNext, Ni); } else { onCPU( A, B, xNext, xNow, Ni); } } end_h = clock(); if (iter%2 != 0) { for (i=0; i<Ni; i++) { xCPU[i] = xNext[i]; } } else { for (i=0; i<Ni; i++) { xCPU[i] = xNow[i]; } } for (i=0; i<Ni; i++) { xNow[i] = 0; xNext[i] = 0; } cudaMalloc((void **) &deviceA, N*sizeof(float)); cudaMalloc((void **) &deviceB, Ni*sizeof(float)); cudaMalloc((void **) &xNowDevice, Ni*sizeof(float)); cudaMalloc((void **) &xNextDevice, Ni*sizeof(float)); cudaMemcpy(deviceA, A, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, B, sizeof(float)*Ni, cudaMemcpyHostToDevice); cudaMemcpy(xNowDevice, xNow, sizeof(float)*Ni, cudaMemcpyHostToDevice); cudaMemcpy(xNextDevice, xNext, sizeof(float)*Ni, cudaMemcpyHostToDevice); int nTiles = Ni/tileSize + (Ni%tileSize == 0?0:1); int gridHeight = Ni/tileSize + (Ni%tileSize == 0?0:1); int gridWidth = Ni/tileSize + (Ni%tileSize == 0?0:1); printf("w=%d, h=%d\n",gridWidth,gridHeight); dim3 dGrid(gridHeight, gridWidth), dBlock(tileSize, tileSize); //============================================= GPU =============================================// start_d = clock(); for (k=0; k<iter; k++) { if (k%2) { onGPU <<< nTiles, tileSize >>> (deviceA, deviceB, xNowDevice, xNextDevice, Ni); } else { onGPU <<< nTiles, tileSize >>> (deviceA, deviceB, xNextDevice, xNowDevice, Ni); } } end_d = clock(); if (iter%2 != 0 ){ cudaMemcpy(xGPU, xNextDevice, sizeof(float)*Ni, cudaMemcpyDeviceToHost); } else { cudaMemcpy(xGPU, xNowDevice, sizeof(float)*Ni, cudaMemcpyDeviceToHost); } free(xNext); free(A); free(xNow); free(B); cudaFree(xNextDevice); cudaFree(deviceA); cudaFree(xNowDevice); cudaFree(deviceB); /* for (i =0 ; i < Ni; i ++) { //printf("xCPU[%d]=%f\n",i,xCPU[i]); //printf("xGPU[%d]=%f\n",i,xGPU[i]); } */ timeCPU = ((float)end_h - (float)start_h) / CLOCKS_PER_SEC; timeGPU = ((float)end_d - (float)start_d) / CLOCKS_PER_SEC; printf("\nTiming:\nCPU: %f\nGPU: %f\n\n", timeCPU, timeGPU); return 0; }
3,766
#include<stdio.h> __global__ void hello_from_gpu() { int gDim = gridDim.x; int bDim = blockDim.x; int bid = blockIdx.x; int tid = threadIdx.x; printf("Hello World from block %d/%d and thread %d/%d!\n", bid, gDim , tid, bDim); } int main(void) { hello_from_gpu<<<2, 3>>>(); cudaDeviceReset(); return 0; }
3,767
// Copyright (c) 2020 Saurabh Yadav // // This software is released under the MIT License. // https://opensource.org/licenses/MIT /* --------------------------------------------------- My Hello world for CUDA programming --------------------------------------------------- */ #include <stdio.h> #include <unistd.h> #include <cuda_runtime.h> /* ------------------------------------ Kernel (GPU function) ------------------------------------ */ __global__ void hello(void) { printf("Hello From the GPU !\n"); } int main() { /* ------------------------------------ Call to the hello( ) kernel function ------------------------------------ */ hello<<<1, 1>>>(); printf("Hello From the CPU ! \n"); cudaDeviceSynchronize(); return 0; }
3,768
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" /* * Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'. */ extern "C" __global__ void reducePartial(int size, void *data, void *result) { float *fdata = (float*) data; float *sum = (float*) result; extern __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; sdata[tid] = (i < size ? fdata[i] : 0) + (i+blockDim.x < size ? fdata[i+blockDim.x] : 0); __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) sum[blockIdx.x] = sdata[0]; }
3,769
#include "includes.h" #define N 10000000 #define MAX_ERR 1e-6 __global__ void vector_add(float* out,float* a,float* b,int n){ int index = threadIdx.x; int stride = blockDim.x; for(int i=index ; i<n ;i=i+stride){ out[i]=a[i]+b[i]; } }
3,770
#include<stdio.h> int main() { int dev; cudaDeviceProp devprop; cudaGetDevice(&dev); cudaGetDeviceProperties(&devprop,dev); printf("name = %s\ntotal global mem = %1fM\nshared mem per block = %1fK\nregs per block = %d\nwarp size = %d\nclock rate = %1fGHz\nmax threads per block= %d\ntotal const mem = %1fK\nmultiprocessor count = %d\nmax threads per multiprocessor = %d\nl2 cache size = %1fK\n",devprop.name,devprop.totalGlobalMem/(1024*1024.0),devprop.sharedMemPerBlock/1024.0,devprop.regsPerBlock,devprop.warpSize,devprop.clockRate/(1000000.0),devprop.maxThreadsPerBlock,devprop.totalConstMem/1024.0,devprop.multiProcessorCount,devprop.maxThreadsPerMultiProcessor,devprop.l2CacheSize/1024.0); }
3,771
//http://stackoverflow.com/questions/22217628/integral-image-or-summed-area-table-of-2d-matrix-using-cuda-c #include <iostream> #include <cuda_runtime.h> #include <stdlib.h> #include <stdio.h> #define BLOCK_DIM_X 16 #define BLOCK_DIM_Y 16 using namespace std; __global__ void sat(int *a, int*b, int rowsTotal,int colsTotal,int start) { // Thread Ids equal to block Ids because the each blocks contains one thread only. int col; int row; if (start<=rowsTotal/blockDim.y-1) { row = (colsTotal/blockDim.x-1-blockIdx.x)*blockDim.y+threadIdx.y-blockDim.y*(colsTotal/blockDim.x-1-start); col = blockIdx.x*blockDim.x+threadIdx.x; } else { row = (colsTotal/blockDim.x-1-blockIdx.x)*blockDim.y+threadIdx.y; col = (start-(colsTotal/blockDim.x-1))*blockDim.x+blockIdx.x*blockDim.x+threadIdx.x; } if (row>=rowsTotal || row <0) return; if (col>=colsTotal || col <0) return; int idx = threadIdx.y*blockDim.x+threadIdx.x; // id in block int didx = row*colsTotal+col; // compute data id __shared__ int s[BLOCK_DIM_X*BLOCK_DIM_Y]; s[idx]=0; __syncthreads(); //printf("run kernel\n"); while (s[BLOCK_DIM_X*BLOCK_DIM_Y-1]==0){ if (s[idx]==0){ if (threadIdx.x>0 && threadIdx.y==0) { if (s[idx-1]) { if (row>0 ) b[didx]=b[didx-colsTotal]+a[didx]+b[didx-1]-b[didx-colsTotal-1]; else b[didx]=a[didx]+b[didx-1]; s[idx]=1; } } if (threadIdx.y>0 && threadIdx.x==0) { if (s[idx-blockDim.x]) { if (col>0 ) b[didx]=b[didx-colsTotal]+a[didx]+b[didx-1]-b[didx-colsTotal-1]; else b[didx]=a[didx]+b[didx-colsTotal]; s[idx]=1; } } if (threadIdx.y>0 && threadIdx.x>0) { if (s[idx-blockDim.x] && s[idx-1]) { b[didx]=b[didx-colsTotal]+a[didx]+b[didx-1]-b[didx-colsTotal-1]; s[idx]=1; } } if (threadIdx.x==0 && threadIdx.y==0) { if (row>0 && col>0) b[didx]=b[didx-colsTotal]+a[didx]+b[didx-1]-b[didx-colsTotal-1]; if (row==0 && col>0) b[didx]=a[didx]+b[didx-1]; if (row>0 && col==0) b[didx]=b[didx-colsTotal]+a[didx]; if (row==0 && col==0) b[didx]=a[didx]; s[idx]=1; } } __syncthreads(); } } void cpu_sat(int* a, int* b, int M, int N){ for(int r=0;r<M;r++) { for(int c=0; c<N;c++) { if(r==0) { if (c>0) b[r*N+c]=b[r*N+c-1]+a[r*N+c]; else b[r*N+c]=a[r*N+c]; } else{ if (c>0) b[r*N+c]=b[r*N+c-1]+a[r*N+c]+b[(r-1)*N+c]-b[(r-1)*N+c-1]; else b[r*N+c]=a[r*N+c]+b[(r-1)*N+c]; } } } } int main() { //M is number of rows //N is number of columns //M,N have to be multiples of BLOCK_DIM_X and BLOCK_DIM_Y int M=64,N=64; int total_e=M*N; int widthstep=total_e*sizeof(int); int * matrix_a= (int *)malloc(widthstep); int * matrix_b= (int *)malloc(widthstep); int * cpu_result = (int *)malloc(widthstep); //cout<<"Enter elements for "<< M<<"x"<<N<<" matrix"; for(int r=0;r<M;r++) { for(int c=0; c<N;c++) { //cout<<"Enter Matrix element [ "<<r<<","<<c<<"]"; matrix_a[r*N+c]=rand()%100; matrix_b[r*N+c]=0; } } cpu_sat(matrix_a,cpu_result,M,N); int * d_matrix_a, * d_matrix_b; //cout<<"start copy"<<endl; /* for(int r=0;r<M;r++) { for(int c=0; c<N;c++) { cout << matrix_a[r*N+c]<<" "; } cout << endl; } cout<<endl; */ cudaMalloc(&d_matrix_a,widthstep); cudaMalloc(&d_matrix_b,widthstep); cudaMemcpy(d_matrix_a,matrix_a,widthstep,cudaMemcpyHostToDevice); cudaMemcpy(d_matrix_b,matrix_b,widthstep,cudaMemcpyHostToDevice); //Creating a grid where the number of blocks are equal to the number of pixels or input matrix elements. //Each block contains only one thread. dim3 grid(N/BLOCK_DIM_X); // grid is one dimensional!! dim3 blockdim(BLOCK_DIM_X,BLOCK_DIM_Y); for (int i=0;i<M/BLOCK_DIM_Y+N/BLOCK_DIM_X-1;i++){ sat<<<grid,blockdim>>>(d_matrix_a, d_matrix_b,M,N,i); //cudaThreadSynchronize(); } cudaThreadSynchronize(); cudaMemcpy(matrix_b,d_matrix_b,widthstep,cudaMemcpyDeviceToHost); cout<<"Compare with CPU result: "<<endl; int count=0; for(int r=0;r<M;r++) { for(int c=0; c<N;c++) { if(cpu_result[r*N+c]!=matrix_b[r*N+c]) { count+=1;//cout << matrix_b[r*N+c]<<" "<<; // if(r==0) cout<<r<<" "<<c<<" cpu:"<<cpu_result[r*N+c]<<" gpu:"<<matrix_b[r*N+c]<<endl; } } //cout << endl; } cout<<"mismatch: "<<count<<endl; //system("pause"); cudaFree(d_matrix_a); cudaFree(d_matrix_b); free(matrix_a); free(matrix_b); return 0; }
3,772
#include "includes.h" __global__ void sax_kernel(const float a, const float* x, float* result, unsigned int len) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) result[idx] = a * x[idx]; }
3,773
__global__ void kernel_initial(float *img, int nx, int ny, int nz, float value){ int ix = 16 * blockIdx.x + threadIdx.x; int iy = 16 * blockIdx.y + threadIdx.y; int iz = 4 * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; img[ix + iy * nx + iz * nx * ny] = value; }
3,774
/*The number of threads per block and the number of blocks per grid specified in the <<<...>>> syntax can be of type int or dim3. Two-dimensional blocks or grids can be specified as in the example above. Each block within the grid can be identified by a one-dimensional, two-dimensional, or three-dimensional index accessible within the kernel through the built-in blockIdx variable. The dimension of the thread block is accessible within the kernel through the built-in blockDim variable. Extending the previous MatAdd() example to handle multiple blocks, the code becomes as follows. */ #include <stdio.h> #define N 1024 __device__ int A[N][N]; __device__ int B[N][N]; __device__ int C[N][N]; __global__ void MatAdd() { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && j < N) C[i][j] = A[i][j] + B[i][j]; } int main() { // Kernel invocation dim3 threadsPerBlock(16, 16); dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y); MatAdd<<<numBlocks, threadsPerBlock>>>(); cudaDeviceSynchronize(); } /*A thread block size of 16x16 (256 threads), although arbitrary in this case, is a common choice. The grid is created with enough blocks to have one thread per matrix element as before. For simplicity, this example assumes that the number of threads per grid in each dimension is evenly divisible by the number of threads per block in that dimension, although that need not be the case. Thread blocks are required to execute independently: It must be possible to execute them in any order, in parallel or in series. This independence requirement allows thread blocks to be scheduled in any order across any number of cores as illustrated by Figure 5, enabling programmers to write code that scales with the number of cores. Threads within a block can cooperate by sharing data through some shared memory and by synchronizing their execution to coordinate memory accesses. More precisely, one can specify synchronization points in the kernel by calling the __syncthreads() intrinsic function; __syncthreads() acts as a barrier at which all threads in the block must wait before any is allowed to proceed. Shared Memory gives an example of using shared memory. For efficient cooperation, the shared memory is expected to be a low-latency memory near each processor core (much like an L1 cache) and __syncthreads() is expected to be lightweight. */
3,775
// Author: Ayush Kumar // Roll No: 170195 // Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p1.cu -o assignment5-p1 #include <cmath> #include <cstdint> #include <cuda.h> #include <iostream> #include <new> #include <sys/time.h> #define THRESHOLD (0.000001) #define SIZE1 8192 #define SIZE2 8200 #define ITER 100 using std::cerr; using std::cout; using std::endl; __global__ void kernel1(double* d_k1) { // TODO: Fill in // int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if(j < SIZE1-1){ for (int k = 0; k < ITER; k++) { for (int i = 1; i < (SIZE1 - 1); i++) { d_k1[i*SIZE1 + j+1] = d_k1[(i-1)*SIZE1 + j+1] + d_k1[i*SIZE1 + j+1] + d_k1[(i+1)*SIZE1 + j+1]; } } } } __global__ void kernel2(double* d_k2) { // TODO: Fill in // int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int unroll = 8; if (j < SIZE2-1) { for (int k = 0; k < ITER; k++) { int i; for (i = 1; i+(unroll-1) < (SIZE2 - 1); i += unroll) { d_k2[i*SIZE2 + j+1] = d_k2[(i-1)*SIZE2 + j+1] + d_k2[i*SIZE2 + j+1] + d_k2[(i+1)*SIZE2 + j+1]; d_k2[(i+1)*SIZE2 + j+1] = d_k2[(i)*SIZE2 + j+1] + d_k2[(i+1)*SIZE2 + j+1] + d_k2[(i+2)*SIZE2 + j+1]; d_k2[(i+2)*SIZE2 + j+1] = d_k2[(i+1)*SIZE2 + j+1] + d_k2[(i+2)*SIZE2 + j+1] + d_k2[(i+3)*SIZE2 + j+1]; d_k2[(i+3)*SIZE2 + j+1] = d_k2[(i+2)*SIZE2 + j+1] + d_k2[(i+3)*SIZE2 + j+1] + d_k2[(i+4)*SIZE2 + j+1]; d_k2[(i+4)*SIZE2 + j+1] = d_k2[(i+3)*SIZE2 + j+1] + d_k2[(i+4)*SIZE2 + j+1] + d_k2[(i+5)*SIZE2 + j+1]; d_k2[(i+5)*SIZE2 + j+1] = d_k2[(i+4)*SIZE2 + j+1] + d_k2[(i+5)*SIZE2 + j+1] + d_k2[(i+6)*SIZE2 + j+1]; d_k2[(i+6)*SIZE2 + j+1] = d_k2[(i+5)*SIZE2 + j+1] + d_k2[(i+6)*SIZE2 + j+1] + d_k2[(i+7)*SIZE2 + j+1]; d_k2[(i+7)*SIZE2 + j+1] = d_k2[(i+6)*SIZE2 + j+1] + d_k2[(i+7)*SIZE2 + j+1] + d_k2[(i+8)*SIZE2 + j+1]; } for(int i1=i; i1<(SIZE2-1); i1++) { d_k2[i1*SIZE2 + j+1] = d_k2[(i1-1)*SIZE2 + j+1] + d_k2[i1*SIZE2 + j+1] + d_k2[(i1+1)*SIZE2 + j+1]; } } } } __host__ void serial(double* h_ser) { for (int k = 0; k < ITER; k++) { for (int i = 1; i < (SIZE1 - 1); i++) { for (int j = 0; j < (SIZE1 - 1); j++) { h_ser[i*SIZE1 + j+1] = (h_ser[(i-1)*SIZE1 + j+1] + h_ser[i*SIZE1 + j+1] + h_ser[(i+1)*SIZE1 + j+1]); } } } } __host__ void check_result(double* w_ref, double* w_opt, uint64_t size) { double maxdiff = 0.0, this_diff = 0.0; int numdiffs = 0; for (uint64_t i = 0; i < size; i++) { for (uint64_t j = 0; j < size; j++) { this_diff = w_ref[i*size + j] - w_opt[i*size + j]; if (fabs(this_diff) > THRESHOLD) { numdiffs++; if (this_diff > maxdiff) maxdiff = this_diff; } } } if (numdiffs > 0) { cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff << endl; } else { cout << "No differences found between base and test versions\n"; } } __host__ double rtclock() { // Seconds struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) { cout << "Error return from gettimeofday: " << stat << "\n"; } return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } int main() { double* h_ser = new double[SIZE1*SIZE1]; double* h_k1 = new double[SIZE1*SIZE1]; //needs to be contiguous for cudaMemcpy to work for (int i = 0; i < SIZE1; i++) { for (int j = 0; j < SIZE1; j++) { h_ser[i*SIZE1 + j] = 1; h_k1[i*SIZE1 + j] = 1; } } double* h_k2 = new double[SIZE2*SIZE2]; for (int i = 0; i < SIZE2; i++) { for (int j = 0; j < SIZE2; j++) { h_k2[i*SIZE2 + j] = 1; } } double clkbegin = rtclock(); serial(h_ser); double clkend = rtclock(); double time = clkend - clkbegin; // seconds cout << "Serial code on CPU: " << ((2.0 * SIZE1 * SIZE1 * ITER) / time) << " GFLOPS; Time = " << time * 1000 << " msec" << endl; cudaError_t status; cudaEvent_t start, end; float k1_time, k2_time; // milliseconds double* d_k1; // TODO: Fill in cudaMalloc(&d_k1, sizeof(double)*SIZE1*SIZE1); // full parallelization dim3 threads_in_block1(32); dim3 blocks_in_grid1(SIZE1/threads_in_block1.x); cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); /************** CUDA **************/ cudaMemcpy(d_k1, h_k1, sizeof(double)*SIZE1*SIZE1, cudaMemcpyHostToDevice); kernel1<<<blocks_in_grid1, threads_in_block1>>>(d_k1); cudaMemcpy(h_k1, d_k1, sizeof(double)*SIZE1*SIZE1, cudaMemcpyDeviceToHost); /************** CUDA **************/ cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&k1_time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); check_result(h_ser, h_k1, SIZE1); cout << "Kernel 1 on GPU: " << ((2.0 * SIZE1 * SIZE1 * ITER) / (k1_time * 1.0e-3)) << " GFLOPS; Time = " << k1_time << " msec" << endl; status = cudaGetLastError(); if (status != cudaSuccess) { cerr << cudaGetErrorString(status) << endl; } double* d_k2; // TODO: Fill in cudaMalloc(&d_k2, sizeof(double)*SIZE2*SIZE2); dim3 threads_in_block2(32); dim3 blocks_in_grid2((SIZE2+threads_in_block2.x-1)/threads_in_block2.x); cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); /************** CUDA **************/ cudaMemcpy(d_k2, h_k2, sizeof(double)*SIZE2*SIZE2, cudaMemcpyHostToDevice); kernel2<<<blocks_in_grid2, threads_in_block2>>>(d_k2); cudaMemcpy(h_k2, d_k2, sizeof(double)*SIZE2*SIZE2, cudaMemcpyDeviceToHost); /************** CUDA **************/ cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&k2_time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); cout << "Kernel 2 on GPU: " << ((2.0 * SIZE2 * SIZE2 * ITER) / (k2_time * 1.0e-3)) << " GFLOPS; Time = " << k2_time << " msec" << endl; status = cudaGetLastError(); if (status != cudaSuccess) { cerr << "CUDA Error: " << cudaGetErrorString(status) << endl; } cudaFree(d_k1); cudaFree(d_k2); delete[] h_ser; delete[] h_k1; delete[] h_k2; return EXIT_SUCCESS; }
3,776
#include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]){ cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); printf(" Device: \"%s\"\n", deviceProp.name); printf(" Compute Capability: %d.%d\n", deviceProp.major, deviceProp.minor); printf(" Multiprocessors count: %d\n", deviceProp.multiProcessorCount); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Total global mem: %0.f MBytes\n", deviceProp.totalGlobalMem/1048576.0f); return 0; }
3,777
/* * Last name: Gupta * First name: Vaibhav * Net ID: vvg239 * */ #include <stdlib.h> #include <stdio.h> #include <stdbool.h> #include <string.h> #include <time.h> void seq_gen_primes(int); __global__ void remove_for_divisor(bool*, unsigned int, int); __global__ void remove_all(bool*, unsigned int, unsigned int); void gpu_gen_primes(unsigned int); int main(int argc, char * argv[]) { int N; // to measure time taken by a specific part of the code double time_taken; clock_t start, end; if(argc == 2) { N = atoi(argv[1]); } else { printf("Please give a value for N\n"); } start = clock(); gpu_gen_primes(N); end = clock(); time_taken = ((double)(end - start))/ CLOCKS_PER_SEC; printf("Time taken for %s is %lf\n","GPU", time_taken); } /******************** The GPU parallel version **************/ void gpu_gen_primes(unsigned int N) { //File Stream Initialization FILE * fPtr; char fileName[15]; sprintf(fileName, "%d", N); strcat(fileName, ".txt"); fPtr = fopen(fileName, "w"); double time_taken; clock_t start, end; start = clock(); //CUDA Memory Allocation int size = (N+1) * sizeof(bool); bool * d_primes; cudaMalloc(&d_primes, size); //Configuring CUDA Kernels unsigned int last_divisor = sqrt(N); int num_threads = 1024; int num_blocks = last_divisor/(num_threads) + 1; int num_threads_for_two = 1024; int num_blocks_for_two = N/(num_threads_for_two*2) + 1; //Call kernels in Streams cudaStream_t stream[4]; int ds[4] = {2,3,5,7}; for (int i = 0; i < 4; i++) { cudaStreamCreate(&stream[i]); remove_for_divisor<<<N/(num_threads_for_two*ds[i]) + 1, num_threads_for_two,0,stream[i]>>>(d_primes, N, ds[i]); } remove_all<<<num_blocks, num_threads, 0, stream[0]>>>(d_primes, N, last_divisor); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA error %s \n", cudaGetErrorString(error)); } end = clock(); time_taken = ((double)(end - start))/ CLOCKS_PER_SEC; printf("Time taken without print statements for %s is %lf\n","GPU", time_taken); //Copy CUDA Memory and Print in File bool * primes; primes = (bool *)calloc(N, sizeof(bool)); cudaMemcpy(primes, d_primes, size, cudaMemcpyDeviceToHost); cudaFree(d_primes); int i; for(i = 2; i < N+1; i++) { if(!primes[i]) { fprintf(fPtr, "%d ", i); } } } __global__ void remove_for_divisor(bool* n_series, unsigned int N, int divisor) { int i = blockIdx.x*blockDim.x + threadIdx.x; int e = divisor*(i+2); if(e <= N) { n_series[e] = true; } } __global__ void remove_all(bool* n_series, unsigned int N, unsigned int max_divisor) { int divisor = blockIdx.x * blockDim.x + threadIdx.x + 3; // this might initialize for some divisors (like 9) that are not prime but this // still gives better performance, than waiting for 3 to finish and then executing 5,7 and 11. if (divisor <= max_divisor && n_series[divisor] == false) { // start marking off from (divisor)^2 for (int j = divisor * divisor; j <= N; j += divisor) { n_series[j] = true; } } }
3,778
#include <cuda.h> #include <stdio.h> #include <stdlib.h> __device__ int mandel(float c_re, float c_im, int count) { float z_re = c_re, z_im = c_im; int i; for (i = 0; i < count; ++i) { if (z_re * z_re + z_im * z_im > 4.f) break; float new_re = z_re * z_re - z_im * z_im; float new_im = 2.f * z_re * z_im; z_re = c_re + new_re; z_im = c_im + new_im; } return i; } __global__ void mandelKernel(int *deviceans,float lowerX,float lowerY,float stepX,float stepY,int resX,int resY,int maxIterations) { // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int groupsize= 8; int localx,localy; localx=(blockIdx.x*blockDim.x + threadIdx.x)*groupsize; localy=(blockIdx.y*blockDim.y+threadIdx.y)*groupsize; double tmpx,tmpy; for(int i=localx;i<localx+groupsize;++i){ for(int j=localy;j<localy+groupsize;++j){ if(i>=resX || j >=resY) continue; tmpx = lowerX + i*stepX; tmpy = lowerY + j*stepY; int ans = mandel(tmpx,tmpy,maxIterations); deviceans[resX* j + i] = ans; } } } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int *deviceans; size_t pitch; int *hostans; dim3 threadperblock(16,16); dim3 numblocks(ceil(resX*1.0/(16*8)),ceil(resY*1.0/(16*8))); //cudaMalloc(&deviceans,resX*resY*sizeof(int)); cudaHostAlloc(&hostans,sizeof(int)*resX*resY,cudaHostAllocMapped); cudaMallocPitch(&deviceans,&pitch,resX*sizeof(int),resY); mandelKernel<<<numblocks,threadperblock>>>(deviceans,lowerX,lowerY,stepX,stepY,resX,resY,maxIterations); cudaDeviceSynchronize(); cudaMemcpy(hostans,deviceans,resY*resX*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<resY;++i){ for(int j = 0;j<resX;++j){ img[i*resX+j]=hostans[i*resX+j]; } } cudaFree(deviceans); cudaFreeHost(hostans); }
3,779
#include <stdlib.h> #include <stdio.h> #include <time.h> #define THREADS 1024 #define BLOCKS 65536 #define NUM_VALS THREADS*BLOCKS #define ASCENDING 1 #define DESCENDING 0 void rand_nums(int *values, unsigned long length) { int i; for (i = 0; i < length; ++i) { values[i] = rand() % INT_MAX + 1;; } } void compAndSwap(int *a, int i, int j, int dir) { int temp; if (dir==(a[i]>a[j])) { temp = a[j]; a[j] = a[i]; a[i] = temp; } } void bitonicMerge(int *a, int low, int cnt, int dir) { if (cnt > 1) { int k = cnt / 2; for (int i = low; i < low + k; i++) compAndSwap(a, i, i + k, dir); bitonicMerge(a, low, k, dir); bitonicMerge(a, low + k, k, dir); } } void bitonicSort(int *values, int low, unsigned long n, int dir) { if(n>1) { int k = n/2; bitonicSort(values, low, k, ASCENDING); bitonicSort(values, low+k, k, DESCENDING); bitonicMerge(values, low, n, dir); } } int main(int argc, char *argv[]) { unsigned long n; double time_spent; clock_t begin, end; int k = 10; if(argc==2) k = atoi(argv[1]); n = pow(2,k); int *values = (int *) malloc(NUM_VALS * sizeof(int)); int *origValues = (int *) malloc(NUM_VALS * sizeof(int)); printf("\nk = %d, n = %ld\n", k, n); rand_nums(values, n); for (unsigned long i = 0; i < n; i++) origValues[i] = values[i]; time_spent = 0.0; begin = clock(); bitonicSort(values, 0, n, ASCENDING); end = clock(); time_spent += (double)(end-begin) / CLOCKS_PER_SEC; printf("\tElapsed time: %f seconds\n", time_spent); free(values); free(origValues); }
3,780
#include "includes.h" __global__ void vecAdd(float* d_A, float* d_B, float* d_C) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<TAM) d_C[i] = d_A[i] + d_B[i]; }
3,781
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <stdio.h> #include <iostream> using namespace std; __global__ void matrixMultiply(int *d_a, size_t pitch_a, int *d_b, size_t pitch_b, int *d_c, size_t pitch_c, const int N, const int M) { __shared__ int input1Temp[4][3]; __shared__ int input2Temp[3][4]; int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; if (row < N&&col < N) { // load d_a to shared memory if (col < N - 1) { int *shared_a = (int *)((char *)d_a + row*pitch_a) + col; input1Temp[row][col] = *shared_a; //__syncthreads(); } // load d_b to shared memory if (row < N - 1) { int *shared_b = (int *)((char *)d_b + row*pitch_b) + col; input2Temp[row][col] = *shared_b; __syncthreads(); } int tmp = 0; for (size_t i = 0; i < M; i++) { tmp += input1Temp[row][i] * input2Temp[i][col]; } int *shared_c = (int *)((char *)d_c + row*pitch_c) + col; *shared_c = tmp; } } int main() { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); const int N = 4; const int M = 3; // use three streams to async copy array from host to device cudaStream_t stream_a, stream_b, stream_c; cudaStreamCreate(&stream_a); cudaStreamCreate(&stream_b); cudaStreamCreate(&stream_c); // allocate output array on device static int h_c[N][N]; int *d_c; size_t pitch_c; cudaMallocPitch(&d_c, &pitch_c, N * sizeof(int), N); cudaMemcpy2DAsync(d_c, pitch_c, h_c, N * sizeof(int), N * sizeof(int), N, cudaMemcpyHostToDevice, stream_c); // allocate 2d array on device int h_a[N][M] = { { 1,2,3 },{ 4,5,6 },{ 7,8,9 },{ 1,3,4 } }; size_t pitch_a; int *d_a; cudaMallocPitch(&d_a, &pitch_a, M * sizeof(int), N); cudaMemcpy2DAsync(d_a, pitch_a, h_a, M * sizeof(int), M * sizeof(int), N, cudaMemcpyHostToDevice, stream_a); int h_b[M][N] = { { 1,2,3,4 },{ 4,5,6,7 },{ 7,8,9,10 } }; size_t pitch_b; int *d_b; cudaMallocPitch(&d_b, &pitch_b, N * sizeof(int), M); cudaMemcpy2DAsync(d_b, pitch_b, h_b, N * sizeof(int), N * sizeof(int), M, cudaMemcpyHostToDevice, stream_b); // ensure data copy is completed cudaStreamSynchronize(stream_a); cudaStreamSynchronize(stream_b); cudaStreamSynchronize(stream_c); dim3 blockSize(1); dim3 threadSize(N, N); matrixMultiply <<<blockSize, threadSize>>>(d_a, pitch_a, d_b, pitch_b, d_c, pitch_c, N, M); cudaDeviceSynchronize(); // copy result to host cudaMemcpy2D(h_c, N * sizeof(int), d_c, pitch_c, N * sizeof(int), N, cudaMemcpyDeviceToHost); for (size_t i = 0; i < N; i++) { for (size_t j = 0; j < N; j++) { cout << h_c[i][j] << ", "; } cout << endl; } //system("pause"); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaStreamDestroy(stream_a); cudaStreamDestroy(stream_b); cudaStreamDestroy(stream_c); return 0; }
3,782
#include<iostream> #include<cmath> #include<thrust/host_vector.h> using namespace std; void populate_ac_angles(float* ac_angles, int num_actions); void populate_ac_angles(float* ac_angles, int num_actions){ //fills array with equally spaced angles in radians for (int i = 0; i < num_actions; i++) ac_angles[i] = i*(2*M_PI)/num_actions; } int main(){ int num_actions = 8; thrust::host_vector<float> H_ac_angles(num_actions); float* ac_angles = thrust::raw_pointer_cast(&H_ac_angles[0]); //TODO: write function populate_ac_angles populate_ac_angles(ac_angles, num_actions); for (int i = 0; i < num_actions; i++) cout << H_ac_angles[i] << endl; }
3,783
#include "includes.h" __global__ void histKernel(char *inData, long size, unsigned int *histo) { __shared__ unsigned int temp[BIN_COUNT][BIN_COUNT]; __shared__ unsigned int blockSum[BIN_COUNT]; int i = 0; while(i < BIN_COUNT) temp[i++][threadIdx.x] = 0; __syncthreads(); int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = blockDim.x * gridDim.x; while(tid < size) { temp[(int)inData[tid]][threadIdx.x]++; tid += offset; } __syncthreads(); i = 0; while(i < BIN_COUNT) blockSum[threadIdx.x] += temp[threadIdx.x][i++]; __syncthreads(); atomicAdd(&(histo[threadIdx.x]), blockSum[threadIdx.x]); }
3,784
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void gpuAdd(int *d_a, int *d_b, int *d_c) { *d_c = *d_a + *d_b; } int main() { //Defining host variables int h_a, h_b, h_c; //Defining Device Pointers int *d_a, *d_b, *d_c; //Initializing host variables h_a = 1; h_b = 4; cudaError_t cudaStatus; // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&d_c, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&d_a, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&d_b, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(d_a,&h_a, sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(d_b, &h_b, sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. gpuAdd<<<1, 1>>>(d_a, d_b, d_c); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(&h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } printf("Passing Parameter by Reference Output: %d + %d = %d\n", h_a, h_b, h_c); Error: cudaFree(d_c); cudaFree(d_a); cudaFree(d_b); return 0; }
3,785
/* College: University of Massachusetts Lowell EECE 7110:High-Performance Comp. on GPUs Semester: Spring 2018 Student : 01639617 Project : Assignment_2 Professor : Dr.Hang Liu Due date: 2/12/2017 Authors : Sai Sri Devesh Kadambari */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> using namespace std; #define zero 0 __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { __shared__ int smem[250]; int row = blockIdx.x ; int tid = blockIdx.x * blockDim.x + threadIdx.x; int step = m/gridDim.x; //step=80 int index_begin = row * step; int index_end= (row+ 1) * step; //Block 0= 0->80 float f=(blockDim.x)/2;int k_b; __syncthreads(); //wait until all the threads in the block reach this point for(int i=(index_begin);i<index_end;i++) //Row=0->80 { smem[tid] =a[i * (blockDim.x)+tid] *b[tid]; //save multiplication value into the smem buffer __syncthreads(); //wait until all the threads reach this point for(int j=((blockDim.x)/2);j>0;j=ceilf(f)) //i=250/2 is 125->62.5(63)->(63-1)->(21)->20->(10)->(5)->(3)->(2)->(1) { k_b=2*f; if(((k_b)%2!=0) && (threadIdx.x == (j-1))) { smem[threadIdx.x -1]+=smem[threadIdx.x]; j=j-1; f=j; } if(threadIdx.x < j) { int temp =smem[threadIdx.x]+smem[threadIdx.x + j]; smem[threadIdx.x]=temp; } __syncthreads(); f=f/2; } c[i]=smem[zero]; } } int main(int argc, char const *argv[]) { int m, n, k; printf("please type in m=A_rows n=A_columns and k=B_columns \n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c;// *h_cc; cudaMallocHost((void **) &h_a, sizeof(int)*m*n); cudaMallocHost((void **) &h_b, sizeof(int)*n*k); cudaMallocHost((void **) &h_c, sizeof(int)*m*k); //cudaMallocHost((void **) &h_cc, sizeof(int)*m*k); for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; //h_a[row_variable*Max_column + column_variable] } } for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); clock_t t; t = clock(); int *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(int)*m*n); cudaMalloc((void **) &d_b, sizeof(int)*n*k); cudaMalloc((void **) &d_c, sizeof(int)*m*k); cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice); //unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; //unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(128); dim3 dimBlock(256); gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k); cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); t = clock()-t; double time_taken = ((double)t)/CLOCKS_PER_SEC; cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %lf ms.\n\n", m, n, n, k, (time_taken/1000)); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); //cudaFreeHost(h_cc); return 0; }
3,786
#include <stdio.h> #include <cuda_runtime.h> /** * CUDA Kernel Device code * done nothing */ __global__ void kernel(void) {} /** * Host main routine */ int main(void) { kernel<<<1,1>>>(); printf("Hello World\n"); return 0; }
3,787
// tests cuEventCreate #include <iostream> #include <memory> using namespace std; #include <cuda.h> __global__ void longKernel(float *data, int N, float value) { for(int i = 0; i < N; i++) { data[i] += value; } } void test1() { int N = 102400; CUstream stream; cuStreamCreate(&stream, 0); cout << "got stream" << endl; float *hostFloats; cuMemHostAlloc((void **)&hostFloats, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE); CUdeviceptr deviceFloats; cuMemAlloc(&deviceFloats, N * sizeof(float)); longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1), 0, stream>>>((float *)deviceFloats, N, 3.0f); cout << "queued kernel 1" << endl; CUevent event; cuEventCreate(&event, CU_EVENT_DISABLE_TIMING); cuEventRecord(event, stream); cuStreamWaitEvent(stream, event, 0); longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1), 0, stream>>>((float *)deviceFloats, N, 3.0f); cout << "queued kernel 2" << endl; // cuCtxSynchronize(); cuStreamSynchronize(stream); cout << "finished" << endl; cuEventDestroy(event); cuMemFreeHost(hostFloats); cuMemFree(deviceFloats); cuStreamDestroy(stream ); } void dump(float *M, int N) { for(int row=0; row < N; row++) { cout << " " << M[row]; } cout << endl; } void fill(float *M, int N, float val) { for(int row=0; row < N; row++) { M[row] = val; } } void test2() { // use a long running kernel, queue an async copy back from device // => returned values should, in theory, be correct... int N = 102400; CUstream stream; cuStreamCreate(&stream, 0); float hostFloats[N]; CUdeviceptr deviceFloats; cout << "call cumemalloc" << endl; cuMemAlloc(&deviceFloats, N * sizeof(float)); cout << "cumemalloc done" << endl; fill(hostFloats, 10, 123); dump(hostFloats, 10); cout << "calling cuMemcpyHtoDAsync" << endl; cuMemcpyHtoDAsync((CUdeviceptr)(((float *)deviceFloats)), hostFloats, N * sizeof(float), stream); cout << "cuMemcpyHtoDAsync done" << endl; longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1), 0, stream>>>((float *)deviceFloats, N, 3.0f); cout << "queued kernel" << endl; cuMemcpyDtoHAsync(hostFloats, (CUdeviceptr)((float *)deviceFloats), N * sizeof(float), stream); cout << "queued async copy" << endl; cuStreamSynchronize(stream); dump(hostFloats, 10); cuMemFree(deviceFloats); cuStreamDestroy(stream); } int main(int argc, char *argv[]) { cout << "test1" << endl; test1(); cout << "test2" << endl; test2(); return 0; }
3,788
#include <cstdio> #include <iostream> const int size = 5; __global__ void add(int *a, int *b, int *c){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int idx = i + size * j; if(i < size && j < size){ c[idx] = a[idx] + b[idx]; } } int main(){ int a[size][size]; int b[size][size]; int c[size][size]; int *g_a, *g_b, *g_c; for(int i = 0; i < size; i++){ for(int j = 0; j < size; j++){ a[i][j] = 1; b[i][j] = 2; } } a[0][3] = 4; cudaMalloc((void**)&g_a, sizeof(int) * size * size); cudaMalloc((void**)&g_b, sizeof(int) * size * size); cudaMalloc((void**)&g_c, sizeof(int) * size * size); cudaMemcpy(g_a, a, sizeof(int) * size * size, cudaMemcpyHostToDevice); cudaMemcpy(g_b, b, sizeof(int) * size * size, cudaMemcpyHostToDevice); //grid数量确保够用 dim3 ThreadsPerBlock(16, 16); dim3 BlocksPerGrid((size - 1) / ThreadsPerBlock.x + 1, (size - 1) / ThreadsPerBlock.y + 1); add<<<BlocksPerGrid, ThreadsPerBlock>>>(g_a, g_b, g_c); //add<<<1, ThreadsPerBlock>>>(g_a, g_b, g_c); cudaMemcpy(c, g_c, sizeof(int) * size * size, cudaMemcpyDeviceToHost); for(int i = 0; i < size; i++){ for(int j = 0; j < size; j++) printf("%d ", c[i][j]); puts(""); } return 0; }
3,789
#include <cstddef> #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <time.h> #define BLOCK_SIZE 32 #define BIG_BLOCK 64 const int INF = ((1 << 30) - 1); __global__ void cal_phase1(int* Dist, int numOfVertex, int round){ int newDist; int big_ty = threadIdx.y * 2; int big_tx = threadIdx.x * 2; int i = BIG_BLOCK * round + big_ty; int j = BIG_BLOCK * round + big_tx; __shared__ int smem_pivot_dist[BIG_BLOCK][BIG_BLOCK]; smem_pivot_dist[big_ty][big_tx] = Dist[i * numOfVertex + j]; smem_pivot_dist[big_ty + 1][big_tx] = Dist[(i + 1) * numOfVertex + j]; smem_pivot_dist[big_ty][big_tx + 1] = Dist[i * numOfVertex + j + 1]; smem_pivot_dist[big_ty + 1][big_tx + 1] = Dist[(i + 1) * numOfVertex + j + 1]; __syncthreads(); #pragma unroll for(int k = 0; k < BIG_BLOCK; k++){ newDist = smem_pivot_dist[big_ty][k] + smem_pivot_dist[k][big_tx]; if(newDist < smem_pivot_dist[big_ty][big_tx]){ smem_pivot_dist[big_ty][big_tx] = newDist; } newDist = smem_pivot_dist[big_ty + 1][k] + smem_pivot_dist[k][big_tx]; if(newDist < smem_pivot_dist[big_ty + 1][big_tx]){ smem_pivot_dist[big_ty + 1][big_tx] = newDist; } newDist = smem_pivot_dist[big_ty][k] + smem_pivot_dist[k][big_tx + 1]; if(newDist < smem_pivot_dist[big_ty][big_tx + 1]){ smem_pivot_dist[big_ty][big_tx + 1] = newDist; } newDist = smem_pivot_dist[big_ty + 1][k] + smem_pivot_dist[k][big_tx + 1]; if(newDist < smem_pivot_dist[big_ty + 1][big_tx + 1]){ smem_pivot_dist[big_ty + 1][big_tx + 1] = newDist; } } __syncthreads(); Dist[i * numOfVertex + j] = smem_pivot_dist[big_ty][big_tx]; Dist[(i + 1) * numOfVertex + j] = smem_pivot_dist[big_ty + 1][big_tx]; Dist[i * numOfVertex + j + 1] = smem_pivot_dist[big_ty][big_tx + 1]; Dist[(i + 1) * numOfVertex + j + 1] = smem_pivot_dist[big_ty + 1][big_tx + 1]; __syncthreads(); } __global__ void cal_phase2(int* Dist, int numOfVertex, int round){ if(blockIdx.x == round){ return; } int big_ty = threadIdx.y * 2; int big_tx = threadIdx.x * 2; int i = BIG_BLOCK * round + big_ty; int j = BIG_BLOCK * round + big_tx; int newDist; int shortestDist00; int shortestDist01; int shortestDist10; int shortestDist11; __shared__ int smem_pivot_dist[BIG_BLOCK][BIG_BLOCK]; __shared__ int smem_current_dist[BIG_BLOCK][BIG_BLOCK]; // const int pivotBlockIndex = i * numOfVertex + j; // int currentBlockIndex; smem_pivot_dist[big_ty][big_tx] = Dist[i * numOfVertex + j]; smem_pivot_dist[big_ty + 1][big_tx] = Dist[(i + 1) * numOfVertex + j]; smem_pivot_dist[big_ty][big_tx + 1] = Dist[i * numOfVertex + j + 1]; smem_pivot_dist[big_ty + 1][big_tx + 1] = Dist[(i + 1) * numOfVertex + j + 1]; __syncthreads(); // Row if(blockIdx.y == 0){ i = BIG_BLOCK * round + big_ty; j = BIG_BLOCK * blockIdx.x + big_tx; } // Column else{ i = BIG_BLOCK * blockIdx.x + big_ty; j = BIG_BLOCK * round + big_tx; } smem_current_dist[big_ty][big_tx] = Dist[i * numOfVertex + j]; smem_current_dist[big_ty + 1][big_tx] = Dist[(i + 1) * numOfVertex + j]; smem_current_dist[big_ty][big_tx + 1] = Dist[i * numOfVertex + j + 1]; smem_current_dist[big_ty + 1][big_tx + 1] = Dist[(i + 1) * numOfVertex + j + 1]; shortestDist00 = smem_current_dist[big_ty][big_tx]; shortestDist10 = smem_current_dist[big_ty + 1][big_tx]; shortestDist01 = smem_current_dist[big_ty][big_tx + 1]; shortestDist11 = smem_current_dist[big_ty + 1][big_tx + 1]; __syncthreads(); // Row if(blockIdx.y == 0){ #pragma unroll for(int k = 0; k < BIG_BLOCK; k++){ newDist = smem_pivot_dist[big_ty][k] + smem_current_dist[k][big_tx]; shortestDist00 = min(shortestDist00, newDist); newDist = smem_pivot_dist[big_ty + 1][k] + smem_current_dist[k][big_tx]; shortestDist10 = min(shortestDist10, newDist); newDist = smem_pivot_dist[big_ty][k] + smem_current_dist[k][big_tx + 1]; shortestDist01 = min(shortestDist01, newDist); newDist = smem_pivot_dist[big_ty + 1][k] + smem_current_dist[k][big_tx + 1]; shortestDist11 = min(shortestDist11, newDist); } } // Column else{ #pragma unroll for(int k = 0; k < BIG_BLOCK; k++){ newDist = smem_current_dist[big_ty][k] + smem_pivot_dist[k][big_tx]; shortestDist00 = min(shortestDist00, newDist); newDist = smem_current_dist[big_ty + 1][k] + smem_pivot_dist[k][big_tx]; shortestDist10 = min(shortestDist10, newDist); newDist = smem_current_dist[big_ty][k] + smem_pivot_dist[k][big_tx + 1]; shortestDist01 = min(shortestDist01, newDist); newDist = smem_current_dist[big_ty + 1][k] + smem_pivot_dist[k][big_tx + 1]; shortestDist11 = min(shortestDist11, newDist); } } __syncthreads(); Dist[i * numOfVertex + j] = shortestDist00; Dist[(i + 1) * numOfVertex + j] = shortestDist10; Dist[i * numOfVertex + j + 1] = shortestDist01; Dist[(i + 1) * numOfVertex + j + 1] = shortestDist11; __syncthreads(); } __global__ void cal_phase3(int* Dist, int numOfVertex, int round){ // if(blockIdx.x == round || blockIdx.y == round){ // return; // } int big_ty = threadIdx.y * 2; int big_tx = threadIdx.x * 2; int i, j; int newDist; int shortestDist00; int shortestDist01; int shortestDist10; int shortestDist11; __shared__ int smem_row_pivot_dist[BIG_BLOCK][BIG_BLOCK]; __shared__ int smem_column_pivot_dist[BIG_BLOCK][BIG_BLOCK]; // __shared__ int smem_current_dist[BIG_BLOCK][BIG_BLOCK]; // Load row-pivot block i = BIG_BLOCK * round + big_ty; j = BIG_BLOCK * blockIdx.x + big_tx; smem_row_pivot_dist[big_ty][big_tx] = Dist[i * numOfVertex + j]; smem_row_pivot_dist[big_ty + 1][big_tx] = Dist[(i + 1) * numOfVertex + j]; smem_row_pivot_dist[big_ty][big_tx + 1] = Dist[i * numOfVertex + j + 1]; smem_row_pivot_dist[big_ty + 1][big_tx + 1] = Dist[(i + 1) * numOfVertex + j + 1]; // Load column-pivot block i = BIG_BLOCK * blockIdx.y + big_ty; j = BIG_BLOCK * round + big_tx; smem_column_pivot_dist[big_ty][big_tx] = Dist[i * numOfVertex + j]; smem_column_pivot_dist[big_ty + 1][big_tx] = Dist[(i + 1) * numOfVertex + j]; smem_column_pivot_dist[big_ty][big_tx + 1] = Dist[i * numOfVertex + j + 1]; smem_column_pivot_dist[big_ty + 1][big_tx + 1] = Dist[(i + 1) * numOfVertex + j + 1]; // Load current block i = BIG_BLOCK * blockIdx.y + big_ty; j = BIG_BLOCK * blockIdx.x + big_tx; shortestDist00 = Dist[i * numOfVertex + j]; shortestDist10 = Dist[(i + 1) * numOfVertex + j]; shortestDist01 = Dist[i * numOfVertex + j + 1]; shortestDist11 = Dist[(i + 1) * numOfVertex + j + 1]; __syncthreads(); #pragma unroll for(int k = 0; k < BIG_BLOCK; k++){ newDist = smem_column_pivot_dist[big_ty][k] + smem_row_pivot_dist[k][big_tx]; shortestDist00 = min(shortestDist00, newDist); newDist = smem_column_pivot_dist[big_ty + 1][k] + smem_row_pivot_dist[k][big_tx]; shortestDist10 = min(shortestDist10, newDist); newDist = smem_column_pivot_dist[big_ty][k] + smem_row_pivot_dist[k][big_tx + 1]; shortestDist01 = min(shortestDist01, newDist); newDist = smem_column_pivot_dist[big_ty + 1][k] + smem_row_pivot_dist[k][big_tx + 1]; shortestDist11 = min(shortestDist11, newDist); } __syncthreads(); Dist[i * numOfVertex + j] = shortestDist00; Dist[(i + 1) * numOfVertex + j] = shortestDist10; Dist[i * numOfVertex + j + 1] = shortestDist01; Dist[(i + 1) * numOfVertex + j + 1] = shortestDist11; __syncthreads(); } void block_FW(int* Dist, int numOfVertex) { cudaError_t status; int* devMem_Dist; //long long dataSize = (long long)numOfVertex * (long long)numOfVertex * sizeof(int); status = cudaMalloc((void**)&devMem_Dist, numOfVertex *numOfVertex * sizeof(int)); if(status != cudaSuccess){ exit(2); } status = cudaMemcpy(devMem_Dist, Dist, numOfVertex * numOfVertex * sizeof(int), cudaMemcpyHostToDevice); if(status != cudaSuccess){ exit(3); } int round = numOfVertex / BIG_BLOCK; //(numOfVertex + BIG_BLOCK - 1) / BIG_BLOCK; dim3 gridSize_phase1(1, 1); dim3 blockSize_phase1(BLOCK_SIZE, BLOCK_SIZE); dim3 gridSize_phase2(numOfVertex / BIG_BLOCK, 2); dim3 blockSize_phase2(BLOCK_SIZE, BLOCK_SIZE); dim3 gridSize_phase3(numOfVertex / BIG_BLOCK, numOfVertex / BIG_BLOCK); dim3 blockSize_phase3(BLOCK_SIZE, BLOCK_SIZE); for (int r = 0; r < round; ++r) { // status = cudaMemcpy(Dist, devMem_Dist, numOfVertex *numOfVertex * sizeof(int), cudaMemcpyDeviceToHost); // printf("\n-------before round: %d--------------------\n", r); // for(int i = 0; i < numOfVertex; i++){ // for(int j = 0; j < numOfVertex; j++){ // if(Dist[i * numOfVertex + j] == INF){ // printf("INF "); // } // else // printf("%d ", Dist[i * numOfVertex + j]); // } // printf("\n"); // } /* Phase 1*/ cal_phase1<<<gridSize_phase1, blockSize_phase1>>>(devMem_Dist, numOfVertex, r); // if(r == 0){ // status = cudaMemcpy(Dist, devMem_Dist, numOfVertex *numOfVertex * sizeof(int), cudaMemcpyDeviceToHost); // printf("\n-------after p1--------------------\n", r); // for(int i = 0; i < numOfVertex; i++){ // for(int j = 0; j < numOfVertex; j++){ // if(Dist[i * numOfVertex + j] == INF){ // printf("INF "); // } // else // printf("%d ", Dist[i * numOfVertex + j]); // } // printf("\n"); // } // } /* Phase 2*/ cal_phase2<<<gridSize_phase2, blockSize_phase2>>>(devMem_Dist, numOfVertex, r); // if(r == 0){ // status = cudaMemcpy(Dist, devMem_Dist, numOfVertex *numOfVertex * sizeof(int), cudaMemcpyDeviceToHost); // printf("\n-------after p2--------------------\n", r); // for(int i = 0; i < numOfVertex; i++){ // for(int j = 0; j < numOfVertex; j++){ // if(Dist[i * numOfVertex + j] == INF){ // printf("INF "); // } // else // printf("%d ", Dist[i * numOfVertex + j]); // } // printf("\n"); // } // } /* Phase 3*/ cal_phase3<<<gridSize_phase3, blockSize_phase3>>>(devMem_Dist, numOfVertex, r); // if(r == 0){ // status = cudaMemcpy(Dist, devMem_Dist, numOfVertex *numOfVertex * sizeof(int), cudaMemcpyDeviceToHost); // printf("\n-------after p3--------------------\n", r); // for(int i = 0; i < numOfVertex; i++){ // for(int j = 0; j < numOfVertex; j++){ // if(Dist[i * numOfVertex + j] == INF){ // printf("INF "); // } // else // printf("%d ", Dist[i * numOfVertex + j]); // } // printf("\n"); // } // } } status = cudaDeviceSynchronize(); if(status != cudaSuccess){ exit(4); } status = cudaMemcpy(Dist, devMem_Dist, numOfVertex *numOfVertex * sizeof(int), cudaMemcpyDeviceToHost); if(status != cudaSuccess){ exit(5); } cudaFree(devMem_Dist); } int main(int argc, char* argv[]) { int numOfVertex, original_numOfVertex, numOfEdge, numOfPadding; // /////////////////////////////////////////////////////////////// // Input // /////////////////////////////////////////////////////////////// FILE* inFile = fopen(argv[1], "rb"); fread(&numOfVertex, sizeof(int), 1, inFile); printf("The number of vertices: %d\n", numOfVertex); fread(&numOfEdge, sizeof(int), 1, inFile); numOfPadding = 0; if(numOfVertex % BIG_BLOCK != 0) numOfPadding = BIG_BLOCK - (numOfVertex % BIG_BLOCK); original_numOfVertex = numOfVertex; numOfVertex += numOfPadding; int* Dist = (int*)malloc(numOfVertex * numOfVertex * sizeof(int)); int* shortestDist = (int*)malloc(original_numOfVertex * original_numOfVertex * sizeof(int)); for (int i = 0; i < numOfVertex; ++i) { for (int j = 0; j < numOfVertex; ++j) { if (i == j && i < numOfVertex - numOfPadding) { Dist[i * numOfVertex + j] = 0; } else { Dist[i * numOfVertex + j] = INF; } } } int pair[3]; for (int i = 0; i < numOfEdge; ++i) { fread(pair, sizeof(int), 3, inFile); Dist[pair[0] * numOfVertex + pair[1]] = pair[2]; } /////////////////////////////////////////////////////// //// print padding /////////////////////////////////////////////////////// // printf("N: %d pad: %d blocksize: %d\n",numOfVertex,numOfPadding, BIG_BLOCK); // printf("=======================================\n"); // for(int i = 0; i < numOfVertex; i++){ // for(int j = 0; j < numOfVertex; j++){ // if(Dist[i * numOfVertex + j] == INF){ // printf("INF "); // } // else // printf("%d ", Dist[i * numOfVertex + j]); // } // printf("\n"); // } ///////////////////////////////////////////////////////////// //Calculate ///////////////////////////////////////////////////////////// block_FW(Dist, numOfVertex); FILE* outFile = fopen(argv[2], "wb"); for (int i = 0; i < numOfVertex; ++i) { for (int j = 0; j < numOfVertex; ++j) { if (Dist[i * numOfVertex + j] >= INF) Dist[i * numOfVertex + j] = INF; } } /////////////////////////////////////////////////////////// //print /////////////////////////////////////////////////////////// // printf("=======================================\n"); // for(int i = 0; i < original_numOfVertex; i++){ // for(int j = 0; j < original_numOfVertex; j++){ // if(Dist[i * numOfVertex + j] == INF){ // printf("INF "); // } // else // printf("%d ", Dist[i * numOfVertex + j]); // } // printf("\n"); // } /////////////////////////////////////////////////////// //// print padding /////////////////////////////////////////////////////// // printf("=======================================\n"); // for(int i = 0; i < numOfVertex; i++){ // for(int j = 0; j < numOfVertex; j++){ // if(Dist[i * numOfVertex + j] == INF){ // printf("INF "); // } // else // printf("%d ", Dist[i * numOfVertex + j]); // } // printf("\n"); // } for(int i = 0; i < original_numOfVertex; i++){ for(int j = 0; j < original_numOfVertex; j++){ shortestDist[i * original_numOfVertex + j] = Dist[i * numOfVertex + j]; } } //////////////////////////////////////////////////////////// // Output //////////////////////////////////////////////////////////// fwrite(shortestDist, sizeof(int), original_numOfVertex * original_numOfVertex, outFile); fclose(inFile); fclose(outFile); delete[]Dist; delete[]shortestDist; return 0; }
3,790
#include "includes.h" __global__ static void pack(const int* prefix_sum, const int* src, int* dst, const int nb_vert) { const int p = blockIdx.x * blockDim.x + threadIdx.x; if(p < nb_vert){ const int elt = src[p]; if(elt >= 0) dst[ prefix_sum[p] ] = elt; } }
3,791
/** * Given some kernel with positional arguments (n -> columns, m -> rows), below * is how to calculate the row/col being worked on and the index into a * linearized matrix. See also http://en.wikipedia.org/wiki/Row-major_order */ __global__ void SomeKernel(float * d_in, float * d_out, int cols, int rows) { int colIdx = blockIdx.x*blockDim.x + threadIdx.x; int rowIdx = blockIdx.y*blockDim.y + threadIdx.y; // ensure we are in a valid row/col, needed because we have generated more // threads than needed. if ((rowIdx < rows) && (colIdx < cols)) { // linearize the index to access d_in/d_out. This is needed because an NxM // (colsXrows) matrix is linearized into a continguous address space. This // simple calculation gives you the index into a linearized 2 dimensional // array. Row*Width+Col is linear index. int idx = rowIdx*cols + colIdx; } }
3,792
//#pragma once //#include "cuda_runtime.h" //#include "vector_operations.cuh" // //__device__ float3 operator+(float3 f1, float3 f2) //{ // return make_float3(f1.x + f2.x, f1.y + f2.y, f1.z + f2.z); //} // //__device__ float3 operator-(float3 f1, float3 f2) //{ // return make_float3(f1.x - f2.x, f1.y - f2.y, f1.z - f2.z); //} // //__device__ float3 operator*(float3 f1, float a) //{ // return make_float3(f1.x *a, f1.y *a, f1.z *a); //} // //__device__ float4 operator+(float4 f1, float4 f2) //{ // return make_float4(f1.x + f2.x, f1.y + f2.y, f1.z + f2.z, f1.w + f2.w); //} // //__device__ float4 operator-(float4 f1, float4 f2) //{ // return make_float4(f1.x - f2.x, f1.y - f2.y, f1.z - f2.z, f1.w - f2.w); //} // //__device__ float4 operator*(float4 f1, float a) //{ // return make_float4(f1.x *a, f1.y *a, f1.z *a, f1.w *a); //} // //__device__ float magnitudeSqr(const float3 vector) //{ // return (vector.x * vector.x) + (vector.y * vector.y) + (vector.z * vector.z); //} // //__device__ float magnitudeSqr(const float4 vector) //{ // return (vector.x * vector.x) + (vector.y * vector.y) + (vector.z * vector.z) + (vector.w * vector.w); //} // // // //__device__ void float3Scale(float3* v, float a) //{ // (*v) = (*v) * a; //} // //__device__ void float3Sub(float3* V, const float3* v1, const float3* v2) //{ // (*V) = (*v1) - (*v2); //} // //__device__ void float3Add(float3* V, const float3* v1, const float3* v2) //{ // (*V) = (*v1) + (*v2); //} // //__device__ float float3Dot(const float3* v1, const float3* v2) //{ // float dot = 0.0f; // dot = v1->x * v2->x; // dot += v1->y * v2->y; // dot += v1->z * v2->z; // return dot; //} // //__device__ void float3Cross(float3* d, const float3* a, const float3* b) //{ // d->x = (a->y * b->z) - (a->z * b->y); // d->y = (a->z * b->x) - (a->x * b->z); // d->z = (a->x * b->y) - (a->y * b->x); // //} // // //__device__ void float4Copy(float4* V, const float4* v1) //{ // V->x = v1->x; // V->y = v1->y; // V->z = v1->z; // V->w = v1->w; //} // //__device__ void float3Copy(float3* V, const float3* v1) //{ // V->x = v1->x; // V->y = v1->y; // V->z = v1->z; //} // //__device__ float float3Len(float3 *v) //{ // return float3Dot(v, v); //} // // //__device__ bool float3Eq(const float3 *v1, const float3 *v2) //{ // return (v1->x == v2->x) && // (v1->y == v2->y) && // (v1->z == v2->z); //} // //__device__ float float3Dist(const float3 * v1, const float3 * v2) //{ // float3 res = (*v1) - (*v2); // return float3Len(&(res)); //}
3,793
#include <stdio.h> #include <cuda_runtime.h> /** * @brief * * @param m * @param n * @param A * @param lda * @param name */ void printMatrix(int m, int n, const float *A, int lda, const char *name) { for (int row = 0; row < m; row++) { for (int col = 0; col < n; col++) { float Areg = A[row + col * lda]; printf("%s(%d,%d) = %.3f\n", name, row + 1, col + 1, Areg); } } } /** * @brief Prints m x n matrix A on host memory. */ void print_cpu_matrix(int m, int n, const float *A) { for (int row = 0; row < m; row++) { for (int col = 0; col < n; col++) { float Areg = A[col + row * n]; printf("(%d,%d)%.3f,", row, col, Areg); } printf("\n"); } } /** * @brief Prints m x n matrix A on device memory. */ void print_device_matrix(int m, int n, const float *A) { float *tempmatrix; tempmatrix = (float *)malloc(sizeof(float) * m * n); cudaMemcpy(tempmatrix, A, sizeof(float) * m * n, cudaMemcpyDeviceToHost); for (int row = 0; row < m; row++) { for (int col = 0; col < n; col++) { float Areg = tempmatrix[col + row * n]; printf("(%d,%d)%.3f,", row, col, Areg); } printf("\n"); } } /** * @brief Prints m vector A on device memory. */ void print_device_vector(int m, const float *A) { float *tempmatrix; tempmatrix = (float *)malloc(sizeof(float) * m); cudaMemcpy(tempmatrix, A, sizeof(float) * m, cudaMemcpyDeviceToHost); for (int row = 0; row < m; row++) { float Areg = tempmatrix[row]; printf("(%d)%.3f,", row, Areg); } printf("\n"); } void printVector(int m, const float *A, const char *name) { for (int i = 0; i < m; i++) { float Areg = A[i]; printf("%.6f\n", Areg); printf("%s(%d) = %.3f\n", name, i, Areg); } }
3,794
#include<stdio.h> #include<iostream> //using the struct of cudaDeviceProp getting the information of gpu void printDeviceProp(cudaDeviceProp devProp){ printf("Name: %s\n", devProp.name); printf("Maximum thread per block: %d\n", devProp.maxThreadsPerBlock); for(int i = 0; i < 3; i++) printf("Maximum dimension of block: %d\t %d\n",i, devProp.maxThreadsDim[i]); return; } //A single thread for each of the n elements, and each thread computes its array index using blockIdx.x*blockDim.x + threadIdx.x. __global__ void saxpy(int n, float a, float *x, float *y){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ y[i] = a*x[i] + y[i]; __syncthreads();//syncronizing threads in kernel calls } } int main(int argc, char* argv[]){ int devCount; int N; float A; float* d_x; float* d_y; //N = atoi(argv[1]); //A = atoi(argv[2]); cudaGetDeviceCount(&devCount); printf("%d Cuda devices\n", devCount); for(int k = 0; k < devCount; ++k){ printf("\nCuda Device %d\n",k); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, k); printDeviceProp(devProp); } printf("Size of array N: "); scanf("%d", &N); printf("Size of scalar value A: "); scanf("%f", &A); //Allocates space from host memory float* h_x = (float*)malloc(N*sizeof(float)); float* h_y = (float*)malloc(N*sizeof(float)); cudaMalloc((void**)&d_x, N*sizeof(float)); cudaMalloc((void**)&d_y, N*sizeof(float)); for(int i=0; i < N; i++){ //Generates random values between 0 and 256 and assigns to allocated space h_x[i] = (float) (rand() % 256); h_y[i] = (float) (rand() % 256); printf("x = %f\n",h_x[i]); printf("y = %f\n",h_y[i]); } //Sends from host to device cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, N*sizeof(float), cudaMemcpyHostToDevice); int blocks = (N + 255)/256; printf("Block number: %d\n",blocks); //calls the kernel function //The first argument in the execution specifies the number of thread blocks in the grid, and the second specifies the number of threads in a thread block. saxpy<<<blocks,256>>>(N, A, d_x, d_y); //returns the value from device to host cudaMemcpy(h_y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost); //Printing the host value after returned from device to host for(int i=0; i < N; i++){ printf("y = %f\n",h_y[i]); } cudaFree(d_x); cudaFree(d_y); free(h_x); free(h_y); return 0; }
3,795
#include "includes.h" __global__ void expMinus(float* out, float* in, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size) out[id] = __expf(-in[id]); }
3,796
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> /* Problem size */ #define M 1024 #define N 1024 #define FLOAT_N 3214212.01 void init_arrays(double* data) { int i, j; for (i = 1; i < (M+1); i++) { for (j = 1; j < (N+1); j++) { data[i*(N+1) + j] = ((double) i*j) / M; } } } __global__ void mean_kernel(double* data, double* mean) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; __shared__ double mean_shared; if (j < (M+1)) { mean_shared = 0.0; int i; for(i = 1; i < (N+1); i++) { mean_shared += data[i * (M+1) + j]; } mean_shared /= FLOAT_N; mean[j] = mean_shared; } } __global__ void data_kernel(double* data, double* mean) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; int i = blockIdx.y * blockDim.y + threadIdx.y + 1; if ((i < (N+1)) && (j < (M+1))) { data[i * (M+1) + j] -= mean[j]; } } __global__ void symmat_kernel(double* symmat, double* data) { int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1; int i, j2; __shared__ double symmat_shared; if (j1 < (M+1)) { for (j2 = j1; j2 < (M+1); j2++) { symmat_shared = 0.0; for(i = 1; i < (N+1); i++) { symmat_shared += data[i * (M+1) + j1] * data[i * (M+1) + j2]; } symmat[j1 * (M+1) + j2] = symmat_shared; symmat[j2 * (M+1) + j1] = symmat[j1 * (M+1) + j2]; } } } int main(int argc, char *argv[]) { double *data_h, *data_d; double *symmat_h, *symmat_d; double *mean_h, *mean_d; struct timeval cpu_start, cpu_end; data_h = (double*)malloc((M+1)*(N+1)*sizeof(double)); mean_h = (double*)malloc((M+1)*sizeof(double)); symmat_h = (double*)malloc((M+1)*(M+1)*sizeof(double)); // Δέσμευση μνήμης στο device για τα διανύσματα cudaMalloc((void **) &data_d, (M+1)*(N+1)*sizeof(double)); cudaMalloc((void **) &mean_d, (M+1)*sizeof(double)); cudaMalloc((void **) &symmat_d, (M+1)*(M+1)*sizeof(double)); cudaMemset(data_d, 0, (M+1)*(N+1)*sizeof(double)); // cudaMemset(symmat_d, 0, (M+1)*(M+1)*sizeof(double)); // cudaMemset(mean_d, 0, (M+1)*sizeof(double)); init_arrays(data_h); // Αντιγραφή data στο device cudaMemcpy(data_d, data_h, (M+1)*(N+1)*sizeof(double), cudaMemcpyHostToDevice); //cudaMemcpy(symmat_d, symmat_h, (M+1)*(M+1)*sizeof(double), cudaMemcpyHostToDevice); //cudaMemcpy(mean_d, mean_h, (M+1)*sizeof(double), cudaMemcpyHostToDevice); //---------------------------------------------------------------------- // Κάθε block θα έχει διάσταση 16x16 unsigned int BLOCK_SIZE_PER_DIM = 16; // Ορισμός διαστάσεων πλέγματος dim3 dimGrid1((M - 1) / BLOCK_SIZE_PER_DIM + 1, 1); dim3 dimGrid2((M - 1) / BLOCK_SIZE_PER_DIM + 1, (N - 1) / BLOCK_SIZE_PER_DIM + 1); dim3 dimGrid3((M - 1) / BLOCK_SIZE_PER_DIM + 1, 1); // Ορισμός διαστάσεων block dim3 dimBlock(BLOCK_SIZE_PER_DIM, BLOCK_SIZE_PER_DIM, 1); //---------------------------------------------------------------------- gettimeofday(&cpu_start, NULL); mean_kernel<<<dimGrid1, dimBlock>>>(data_d, mean_d); cudaThreadSynchronize(); data_kernel<<<dimGrid2, dimBlock>>>(data_d, mean_d); cudaThreadSynchronize(); symmat_kernel<<<dimGrid3, dimBlock>>>(symmat_d,data_d); cudaThreadSynchronize(); cudaMemcpy(symmat_h, symmat_d, (M+1)*(M+1)*sizeof(double), cudaMemcpyDeviceToHost); gettimeofday(&cpu_end, NULL); fprintf(stdout, "GPU Runtime: %0.6lfs\n", ((cpu_end.tv_sec - cpu_start.tv_sec) * 1000000.0 + (cpu_end.tv_usec - cpu_start.tv_usec)) / 1000000.0); printf("================================\n"); FILE *f = fopen("ask3_cuda_output.txt", "w+"); if (f == NULL) { printf("Error opening ask3_cuda_output.txt!\n"); exit(1); } for(int i = 1; i < (M+1); i++) { for(int j = 1; j < (M+1); j++){ fprintf(f, "%f\n", symmat_h[i * (M+1) + j]); } } if(f) { printf("Results saved in ask3_cuda_output.txt!\n"); } fclose(f); // Αποδέσμευση μνήμης στον host free(data_h); free(mean_h); free(symmat_h); // Αποδέσμευση μνήμης στον host cudaFree(data_d); cudaFree(mean_d); cudaFree(symmat_d); return 0; }
3,797
#include <stdlib.h> #include <stdio.h> #include <time.h> #define THREADS 16 #define BLOCKS 32 #define WIDTH (THREADS * BLOCKS) int size = WIDTH * WIDTH * sizeof(float); float *M, *N, *P; float *gpuM, *gpuN, *gpuP; time_t seed; void initGPU(int devNum){ cudaSetDevice(devNum); cudaMalloc((void**)&gpuM, size); cudaMalloc((void**)&gpuN, size); cudaMalloc((void**)&gpuP, size); } void randomInit(float* data, int size){ for (int i=0; i<size; i++) data[i] = drand48(); } void cpuMatrixMul(const float* M, const float* N, float* P){ int i, j, k; float sum; for (i=0; i<WIDTH; i++) for (j=0; j<WIDTH; j++){ sum = 0.0f; for (k=0; k<WIDTH; k++) sum += M[i * WIDTH + k] * N[k * WIDTH + j]; P[i * WIDTH + j] = sum; } } __global__ void matrixMultKernel(float* M, float* N, float* P){ int tx = blockIdx.x * THREADS + threadIdx.x; int ty = blockIdx.y * THREADS + threadIdx.y; float tmp = 0.0f; for (int k=0; k<WIDTH; k++) tmp += M[ty * WIDTH + k] * N[k * WIDTH + tx]; P[ty * WIDTH + tx] = tmp; } int main(void){ float msecTotal; cudaEvent_t start; cudaEvent_t stop; initGPU(0); time(&seed); srand48(seed); M = (float*)malloc(size); N = (float*)malloc(size); P = (float*)malloc(size); randomInit(M, WIDTH*WIDTH); randomInit(N, WIDTH*WIDTH); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); cpuMatrixMul(M, N, P); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); cudaEventElapsedTime(&msecTotal, start, stop); printf("cpu time: %.3f ms\n", msecTotal); cudaEventRecord(start, NULL); cudaMemcpy(gpuM, M, size, cudaMemcpyHostToDevice); cudaMemcpy(gpuN, N, size, cudaMemcpyHostToDevice); dim3 numThread(THREADS, THREADS); dim3 numBlock(BLOCKS, BLOCKS); matrixMultKernel<<<numBlock, numThread>>>(gpuM, gpuN, gpuP); cudaDeviceSynchronize(); cudaMemcpy(P, gpuP, size, cudaMemcpyDeviceToHost); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); cudaEventElapsedTime(&msecTotal, start, stop); printf("gpu time: %.3f ms\n",msecTotal); free(M); free(N); free(P); cudaFree(gpuM); cudaFree(gpuN); cudaFree(gpuP); return 0; }
3,798
#include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void hello_kernel(char *odata, int num) { char hello_str[480] = "#######################################\n _ _ _ \n | | | | | | \n | |__| | ___ _ __ __ _| | ___ ___ \n | __ |/ _ \\ '_ \\ / _` | |/ _ \\/ _ \\ \n | | | | __/ | | | (_| | | __/ __/ \n |_| |_|\\___|_| |_|\\__, |_|\\___|\\___| \n __/ | \n |___/ \n+++++++++++++++++++++++++++++++++++++++\n:::::::::::::::::::::::::::::::::::::::\n"; int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num) odata[idx] = hello_str[idx]; } int main(void) { char *h_data, *d_data; const int strlen = 480; size_t strsize = strlen * sizeof(char); h_data = (char *) malloc(strsize); memset(h_data, 0, strlen); cudaMalloc((void **) &d_data, strsize); cudaMemcpy(d_data, h_data, strsize, cudaMemcpyHostToDevice); int blocksize = 8; int nblock = strlen/blocksize + (strlen % blocksize == 0 ? 0 : 1); hello_kernel<<<nblock,blocksize>>>(d_data, strlen); cudaMemcpy(h_data, d_data, sizeof(char)*strlen, cudaMemcpyDeviceToHost); printf("%s\n", h_data); free(h_data); cudaFree(d_data); }
3,799
#include <stdio.h> #include <stdlib.h> #define SIZE 10 int main(int argc , char **argv){ int * p; cudaError_t err; err=cudaMalloc((void**)&p,SIZE*sizeof(int)); if( err != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } int i; for(i=0;i<SIZE;i++){ //Accessing variables allocated on global memory in host function causes Segmentation fault. p[i]=1; } cudaFree(p); return 0; }
3,800
#include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 32 extern "C" { __global__ void mul_matrix(int *A, int *B, int *C, int n){ unsigned int i; int product = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < n && col < n){ for (i = 0; i < n; i++) product += A[row * n + i] * B[i * n + col]; C[row*n + col] = product; } } // CUDA code here int cuda_matrixMul(int *a_h, int *b_h, int *c_h, int N, int device_id){ cudaError_t err; int *a_d, *b_d, *c_d; size_t size = N * N * sizeof (int); // Test 300MB //size_t size = 314572800; printf("C: device id >> %d\n", device_id); cudaSetDevice(device_id); printf("C: Allocate GPU Memory1\n"); // allocate memory in the GPU device for a, b and c err = cudaMalloc((void **) & a_d, size); if (err != cudaSuccess){ printf("CUDA error(1): %s\n", cudaGetErrorString(err)); exit(-1); } printf("C: Allocate GPU Memory2\n"); err = cudaMalloc((void **) & b_d, size); if (err != cudaSuccess){ printf("CUDA error(2): %s\n", cudaGetErrorString(err)); exit(-1); } printf("C: Allocate GPU Memory3\n"); err = cudaMalloc((void **) & c_d, size); if (err != cudaSuccess){ printf("CUDA error(3): %s\n", cudaGetErrorString(err)); exit(-1); } size = N * N * sizeof (int); // copy from host to GPU device printf("C: Memory Copy1\n"); err = cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); if (err != cudaSuccess){ printf("CUDA error(4): %s\n", cudaGetErrorString(err)); exit(-1); } printf("C: Memory Copy2\n"); err = cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); if (err != cudaSuccess){ printf("CUDA error(5): %s\n", cudaGetErrorString(err)); exit(-1); } // do calculations on device dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE); // Launch GPU printf("C: Launch\n"); mul_matrix<<<grid, block>>>(a_d, b_d, c_d, N); printf("C: Memory Copy\n"); cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); return N; } }