serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
22,001
#include "includes.h" extern "C" { } __global__ void cross_entropy_forward(unsigned int batch_size, unsigned int nclasses, const float* x, const float* t, float* y) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < batch_size) { // compute max value of slice float m = x[tid*nclasses]; for(int i = 1; i < nclasses; ++i) { m = fmaxf(x[tid*nclasses+i], m); } // subtract max for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = x[tid*nclasses+i]-m; } // sum float s = 0.0f; for(int i = 0; i < nclasses; ++i) { s += expf(y[tid*nclasses+i]); } // compute ln(s) float ln_s = logf(s); // y = (ln_s - y) * t for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = (ln_s - y[tid*nclasses+i]) * t[tid*nclasses+i]; } } }
22,002
#include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 16 __global__ void rules(int size, int *simulation, int *newsimulation) { // We want id ∈ [1,size] int d_row = blockDim.y * blockIdx.y + threadIdx.y + 1; int d_col = blockDim.x * blockIdx.x + threadIdx.x + 1; int id = d_row * (size+2) + d_col; int count; if (d_row <= size && d_col <= size) { // Get the number of neighbors for a given simulation point count = simulation[id+(size+2)] + simulation[id-(size+2)] //upper lower + simulation[id+1] + simulation[id-1] //right left + simulation[id+(size+3)] + simulation[id-(size+3)] //diagonals + simulation[id-(size+1)] + simulation[id+(size+1)]; int cell = simulation[id]; // Here we have explicitly all of the game rules if (cell == 1 && count < 2) newsimulation[id] = 0; else if (cell == 1 && (count == 2 || count == 3)) newsimulation[id] = 1; else if (cell == 1 && count > 3) newsimulation[id] = 0; else if (cell == 0 && count == 3) newsimulation[id] = 1; else newsimulation[id] = cell; } } void errorCheck() { cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } } int main(int argc, char* argv[]) { int i,j; int* matrix; int* d_matrix; int* t_matrix; int* d_tmpsimulation; int size = 4999; int maxIter = 1<<12; int N = sizeof(int)*(size+2)*(size+2); matrix = (int*)malloc(N); cudaMalloc(&d_matrix, N); cudaMalloc(&t_matrix, N); for(i = 0; i<=size+1; i++) { for(j = 0; j<=size+1; j++) { if (i == 0 || j == size ){ matrix[i*(size+2)+j] = 0; } else if (i == size || j== size){ matrix[i*(size+2)+j] = 0; } else matrix[i*(size+2)+j] = rand() % 2; } } cudaMemcpy(d_matrix, matrix, N, cudaMemcpyHostToDevice); dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE,1); int linsimulation = (int)ceil(size/(float)BLOCK_SIZE); dim3 simulationSize(linsimulation,linsimulation,1); dim3 cpyBlockSize(BLOCK_SIZE,1,1); dim3 cpysimulationRowssimulationSize((int)ceil(size/(float)cpyBlockSize.x),1,1); dim3 cpysimulationColssimulationSize((int)ceil((size+2)/(float)cpyBlockSize.x),1,1); errorCheck(); for (i = 0; i<maxIter; i++) { rules<<<simulationSize, blockSize>>>(size, d_matrix, t_matrix); d_tmpsimulation = d_matrix; d_matrix = t_matrix; t_matrix = d_tmpsimulation; } for (i = 4096; i< 5000; i++) { rules<<<simulationSize, blockSize>>>(size, d_matrix, t_matrix); d_tmpsimulation = d_matrix; d_matrix = t_matrix; t_matrix = d_tmpsimulation; } cudaMemcpy(matrix, d_matrix, N, cudaMemcpyDeviceToHost); for (i = 0; i<=size+1; i++) { for (j = 0; j<=size+1; j++) { printf("%d",matrix[i*(size+2) + j]); } printf("\n"); } cudaFree(d_matrix); cudaFree(t_matrix); free(matrix); return 0; }
22,003
#include "cuda_runtime.h" #include "device_launch_parameters.h" #define thread_size 128 #include <stdio.h> #include <math.h> const long N = 16 * 16; __global__ void Vector_Addition( long *dev_a) { //Get the id of thread within a block unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < N) // check the boundry condition for the threads dev_a[tid] = dev_a[tid] + 45; } int main(void) { //Host array long Host_a[N]; //Device array long *dev_a; long block_size = (int)((N / thread_size) + 0.99); //Allocate the memory on the GPU cudaMalloc((void **)&dev_a, N*sizeof(long)); //fill the Host array with random elements on the CPU for (long i = 0; i <N; i++) { Host_a[i] = i + 2; } for (int i = 0; i<100; i++) printf(" = %d\n", Host_a[i]); printf("************************************************\n"); //Copy Host array to Device array cudaMemcpy(dev_a, Host_a, N*sizeof(long), cudaMemcpyHostToDevice); //Make a call to GPU kernel Vector_Addition <<< block_size, 128 >>> (dev_a); //Copy back to Host array from Device array cudaMemcpy(Host_a, dev_a, N*sizeof(long), cudaMemcpyDeviceToHost); //Display the result for (int i = 0; i<100; i++) printf(" =%d \n", Host_a[i]); //Free the Device array memory cudaFree(dev_a); return 0; }
22,004
/* * Copyright (C) 2009 by Vitsios Dimitrios, Thomaidis Panagiotis * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /** * Shortest path, * parallel implementation * using CUDA */ #include <stdio.h> #include <sys/types.h> #include <cuda.h> #include <time.h> #include <math.h> #include <float.h> #define N 512 __global__ void shortest_path( float *c, float *C , int k, int iter, int *path, int counter, int L, int K) { __shared__ float C_th[N]; __shared__ int MIN_IDX[N]; unsigned int s; int index = blockIdx.x * N + threadIdx.x; MIN_IDX[threadIdx.x] = threadIdx.x; int offset = (k*k+iter)*N*N; C_th[threadIdx.x] = C[threadIdx.x] + c[index + offset]; __syncthreads(); for(s=blockDim.x/2;s>32;s>>=1){ if(threadIdx.x < s){ if( C_th[MIN_IDX[threadIdx.x]] > C_th[MIN_IDX[threadIdx.x + s]]){ MIN_IDX[threadIdx.x] = MIN_IDX[threadIdx.x + s]; } } __syncthreads(); } if(threadIdx.x < 32){ if(N > 32){ if( C_th[MIN_IDX[threadIdx.x]] > C_th[MIN_IDX[threadIdx.x + 32]]){ MIN_IDX[threadIdx.x] = MIN_IDX[threadIdx.x + 32]; } } if(N > 16){ if( C_th[MIN_IDX[threadIdx.x]] > C_th[MIN_IDX[threadIdx.x + 16]]){ MIN_IDX[threadIdx.x] = MIN_IDX[threadIdx.x + 16]; } } if(N > 8){ if( C_th[MIN_IDX[threadIdx.x]] > C_th[MIN_IDX[threadIdx.x + 8]]){ MIN_IDX[threadIdx.x] = MIN_IDX[threadIdx.x + 8]; } } if(N > 4){ if( C_th[MIN_IDX[threadIdx.x]] > C_th[MIN_IDX[threadIdx.x + 4]]){ MIN_IDX[threadIdx.x] = MIN_IDX[threadIdx.x + 4]; } } if(N > 2){ if( C_th[MIN_IDX[threadIdx.x]] > C_th[MIN_IDX[threadIdx.x + 2]]){ MIN_IDX[threadIdx.x] = MIN_IDX[threadIdx.x + 2]; } } if( C_th[MIN_IDX[threadIdx.x]] > C_th[MIN_IDX[threadIdx.x + 1]]){ MIN_IDX[threadIdx.x] = MIN_IDX[threadIdx.x + 1]; } } __syncthreads(); if(threadIdx.x == 0){ C[blockIdx.x] = C_th[MIN_IDX[0]]; path[blockIdx.x*L + counter] = MIN_IDX[0]; } } int main(int argc, char* argv[]) { FILE *f_path; clock_t start, end; int i,j,r=0, k=1, iter, counter=0, *path_host, *path, L, K; float *c_host, *c, *C_host, *C_host_L, *C; printf("Type the number of levels of the graph (L): [ < 2048]\n\n"); scanf("%d",&L); printf("Type the value of 'K': \n\n"); scanf("%d",&K); cudaStream_t stream[2]; cudaStreamCreate(&stream[0]); cudaStreamCreate(&stream[1]); C_host = (float *)malloc( N * sizeof(float) ); C_host_L = (float *)malloc( N * sizeof(float) ); path_host = (int *)malloc( L * N * sizeof(int) ); path = (int *)malloc( L * N * sizeof(int) ); int size_c = L * N * N * sizeof(float); cudaMallocHost((void**)&c_host, size_c); srand(5); //initialiZe c_host[][] matrix... for(i=0; i<L*N*N; i++) c_host[i] = (float)(rand() % 1000 + 10)/100; //supposing that edges have costs from 0.1 to 109 //initialiZe C_host[][] matrix... for(i=0; i<N; i++) C_host[i] = (float)(rand() % 1000 + 10)/100; //initialiZe random C_host matrix for the last but one level for(i=0; i<N; i++) C_host_L[i] = (float)(rand() % 1000 + 10)/100; start=clock(); int size1 = N * N * sizeof(float); cudaMalloc((void**)&c,2*K*size1); cudaMemcpy( c, c_host, K*size1, cudaMemcpyHostToDevice ); int size2 = N * sizeof(float); cudaMalloc((void**)&C,size2); cudaMemcpy( C, C_host, size2, cudaMemcpyHostToDevice ); int size3 = N * L* sizeof(int); cudaMalloc((void**)&path,size3); cudaMemcpy( path, path_host, size3, cudaMemcpyHostToDevice ); printf("GPU computing started!\n"); for(r=1; r<(L/K); r++){ cudaMemcpyAsync( c+k*K*N*N, c_host+r*K*N*N, K*size1, cudaMemcpyHostToDevice, stream[0] ); for(iter=0;iter<K;iter++){ shortest_path<<< N, N, 0, stream[1] >>>( c, C , !k, iter, path, counter, L, K); counter++; } cudaThreadSynchronize(); k == 0 ? k = 1 : k = 0; } for(iter=0;iter<K;iter++){ shortest_path<<< N, N, 0, stream[0] >>>( c, C , !k, iter, path, counter, L, K); counter++; } cudaMemcpy(C_host, C, size2, cudaMemcpyDeviceToHost); cudaMemcpy(path_host, path, size3, cudaMemcpyDeviceToHost); end=clock(); float total_min = C_host[0]+C_host_L[0]; int total_min_idx = 0; for(i=1; i<N; i++){ if( C_host[i] + C_host_L[i]< total_min){ total_min = C_host[i]+ C_host_L[i]; total_min_idx = i; } } printf("\nTotal min = %f", total_min); printf("\nTotal min INDEX = %d", total_min_idx); f_path = fopen("path.txt","w"); printf("\n\n*** path_host ***\n"); for(j=0; j<L; j++){ printf("%d ", path_host[ total_min_idx*L + j ]); fprintf(f_path,"%d", path_host[ total_min_idx*L + j ]); } printf("\n\n*******************************************************************************"); printf("\nTotal time elapsed for transfering the data and computing in GPU: %d ms",(end-start)*1000/CLOCKS_PER_SEC); scanf("%d",&i); return EXIT_SUCCESS; }
22,005
#include <iostream> #include "matrix.cuh" __global__ void add(Matrix * m1, Matrix * m2){ int index = threadIdx.x * m1->getCols() + blockIdx.x; m1->add(index,m2->getVal(index)); } void sync(){ cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess){ fprintf(stderr,"Error: %s\n",cudaGetErrorString(error)); exit(1); } } Matrix * moveMatrixToDevice(Matrix * h_m); int main(){ Matrix * h_m1 = new Matrix(8,8,1), * h_m2 = new Matrix(8,8,1); std::cout << "M1\n"; h_m1->display(); std::cout << "M2\n"; h_m2->display(); h_m1->matToDevice(); h_m2->matToDevice(); Matrix * d_m1 = moveMatrixToDevice(h_m1), * d_m2 = moveMatrixToDevice(h_m2); std::cout << "Addition\n"; add<<<8,8>>>(d_m1,d_m2); sync(); std::cout << "After Addition\n"; std::cout << "M1\n"; h_m1->matToHost(); h_m1->display(); std::cout << "M2\n"; h_m1->matToHost(); h_m2->display(); } Matrix * moveMatrixToDevice(Matrix * h_m){ Matrix * d_m; //Allocate Space cudaMalloc((void **) &d_m,sizeof(Matrix)); //Copy to device cudaMemcpy(d_m, h_m, sizeof(Matrix),cudaMemcpyHostToDevice); return d_m; }
22,006
#include <iostream> #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <math.h> // This defines size of a small square box or thread dimensions in one block. // 分块矩阵计算,每一个块中线程将计算这些分块矩阵的元素, // 进行矩阵乘法的总块数等于原始矩阵的大小除以分块的大小计算得到。 #define TILE_SIZE 2 // Define size of the square matrix. const int size = 4; // Define kernel funciotn for square matrix multiplication using non shared memory. __global__ void gpu_matrix_multiplcation_nonshared(float *device_a, float *device_b, float *device_c, const int size) { // Compute the index of rows and columns for matrix. // 将矩阵在存储器中按照 行主序 的方式线性存储,这样索引原始矩阵的元素计算方法: // index = 行号乘以矩阵的宽度 + 列号 // 在图像处理中,常用一个图像的像素矩阵,按照 行主序 方式存储, // 然后从 行向量 转置为 列向量,作为神经网络的输入。 unsigned int row, col; col = TILE_SIZE * blockIdx.x + threadIdx.x; row = TILE_SIZE * blockIdx.y + threadIdx.y; for (unsigned int k = 0; k < size; ++k) { // 第一个矩阵的行元素对应乘以第二个矩阵的列元素,计算的累和就得到结果矩阵的对应位置。 // AB=C ——> sum (a[i,k] * b[k,j]) = c[i,j] when k form 1 to n. device_c[row * size + col] += device_a[row * size + k] * device_b[k * size + col]; } } // Define kernel function for square matrix multiplication using shared memory. __global__ void gpu_matrix_multiplcation_shared(float *device_a, float *device_b, float *device_c, const int size) { unsigned int row, col; // 使用共享内存来存储计算分块矩阵, // 矩阵乘法中同样的数据被多次使用,这种情况正是共享内存的理想情况。 __shared__ float shared_a[TILE_SIZE][TILE_SIZE]; __shared__ float shared_b[TILE_SIZE][TILE_SIZE]; // Calculate thread id. col = TILE_SIZE * blockIdx.x + threadIdx.x; row = TILE_SIZE * blockIdx.y + threadIdx.y; // 计算分块矩阵中的索引存储到共享内存中。 // 保存需要重用的数据。 for (unsigned int i = 0; i < size / TILE_SIZE; ++i) { shared_a[threadIdx.y][threadIdx.x] = device_a[row * size + (i * TILE_SIZE + threadIdx.x)]; shared_b[threadIdx.y][threadIdx.x] = device_b[row * size + (i * TILE_SIZE + threadIdx.x)]; } // 保证所有数据已经完成写入操作。 __syncthreads(); // 计算矩阵乘法。 for (unsigned int j = 0; j < TILE_SIZE; ++j) { // 大量的计算,读取都发生在共享内存中,显著降低对全局内存的访问,提高性能。 device_c[row * size + col] += shared_a[threadIdx.x][j] * shared_b[j][threadIdx.y]; // synchronizing the threads. __syncthreads(); } } // show the result of matrix multiplication. /**使用1级指针访问二维数组 * 因为数组本身在地址空间中就是连续排列的,根据行数和列数, * 计算出访问单元的 地址偏移量 就可以用一级指针遍历二维数组中的所有数据。 */ // void show_result(float *ptr_array, int size) // { // printf("The result of Matrix multiplication is: \n"); // for (int i = 0; i < size; ++i) // { // for (int j = 0; j < size; ++j) // { // // 维数组在内存中存储是线性连续的,可以计算出二维数组的偏移量,进而使用一级指针遍历二维数组。 // printf("%f ", *(host_result + i * size + j)); // } // printf("\n"); // } // } // 使用指向一维数组的指针(一维数组的长度和二维数组的列数要一样)来遍历二维数组, // 这样的好处就是,可以向使用二维数组名那样,通过下标来访问。 // void show_result(float (*host_result)[size], int size) // { // printf("The result of Matrix multiplication is: \n"); // for (int i = 0; i < size; ++i) // { // for (int j = 0; j < size; ++j) // { // // 维数组在内存中存储是线性连续的,可以计算出二维数组的偏移量,进而使用一级指针遍历二维数组。 // printf("%f ", host_result[i][j]); // } // printf("\n"); // } // } int main(int argc, char *argv[]) { // Define host and device arrays. float host_a[size][size], host_b[size][size], host_result[size][size]; float *device_a, *device_b, *device_result; // fill host matrix. for (int i = 0; i < size; ++i) { for (int j = 0; j < size; ++j) { host_a[i][j] = i; host_b[i][j] = j; } } // Malloc memory for device. cudaMalloc((void**)&device_a, size * size * sizeof(int)); cudaMalloc((void**)&device_b, size * size * sizeof(int)); cudaMalloc((void**)&device_result, size * size * sizeof(int)); // Copy data from host to device. cudaMemcpy(device_a, host_a, size * size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_b, host_b, size * size * sizeof(int), cudaMemcpyHostToDevice); // 使用 dim3 结构体定义 Grid 中的块和块中的线程形状,提前计算好的。 // 多维线程的使用。 dim3 dimGrid(size / TILE_SIZE, size / TILE_SIZE, 1); dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1); // kernel call with nonshared memory. // gpu_matrix_multiplcation_nonshared <<< dimGrid, dimBlock >>> (device_a, device_b, device_result, size); // kernel call with shared memory. gpu_matrix_multiplcation_shared <<< dimGrid, dimBlock >>> (device_a, device_b, device_result, size); // Copy data from device to host. cudaMemcpy(host_result, device_result, size * size * sizeof(int), cudaMemcpyDeviceToHost); // Print result in concole. printf("The result of Matrix multiplication is: \n"); for (int i = 0; i < size; ++i) { for (int j = 0; j < size; ++j) { // 维数组在内存中存储是线性连续的,可以计算出二维数组的偏移量,进而使用一级指针遍历二维数组。 printf("%f \t", host_result[i][j]); } printf("\n"); } // Free up dynaming memory. cudaFree(device_a); cudaFree(device_b); cudaFree(device_result); return 0; }
22,007
#include <vector> #include <iostream> #include <fstream> __device__ void remove_index_from_array(char* arr, int index, int temp_length) { for (int i = 0; i < temp_length - 1; i++) { if (i >= index) { arr[i] = arr[i+1]; } } } __device__ void remove_index_from_shared_memory_array(char* arr, int index, int temp_length, int threadId, int word_length) { for (int i = word_length * threadId; i < word_length * threadId + temp_length - 1; i++) { //printf("thread_id = %d | arr[%d] = %d | index = %d\n", threadId, i, arr[i], index); if (i >= index) { //printf("thread_id = %d | arr[%d] = %d | index = %d | arr[%d] = %c | arr[%d] = %c\n", threadId, i, arr[i], index, i, arr[i], i+1, arr[i+1]); arr[i] = arr[i + 1]; } } } __global__ void find_all_permutations_kernel_shared_memory(char* word, int word_length, unsigned long long num_perm, char* permutations) { extern __shared__ char temp_word[ ]; unsigned long long thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned long long thread_num = blockDim.x * gridDim.x; unsigned long long warp_id = thread_id / 32; unsigned long long warp_num = thread_num % 32 == 0 ? thread_num / 32 : thread_num / 32 + 1; unsigned long long load = num_perm % warp_num == 0 ? num_perm / warp_num : num_perm / warp_num + 1; unsigned long long beg = load * warp_id; unsigned long long end = min(num_perm, beg + load); unsigned long long lane = thread_id % 32; beg += lane; for(int i = beg; i < end; i += 32) { //char* temp = word; unsigned long long divisor = num_perm; int temp_length = word_length; // populate shared memory with word for (int j = beg * word_length; j < beg * word_length + word_length; j++) { //printf("temp[%d] = %c\n", j, word[j % word_length]); temp_word[j] = word[j % word_length]; } //printf("divisor = %llu | num_perm = %llu | word_length %d\n", divisor, num_perm, word_length); unsigned long long permutations_index = 0; for (int digit = word_length; digit > 0; digit--) { //printf("divisor before = %llu, digit = %d\n", divisor, digit); divisor /= digit; //printf("divisor after = %llu\n", divisor); unsigned long long t = i / divisor; int index = t % digit; int true_index = index + beg * word_length; //printf("permutations[%llu] = temp[%d] = %c | divisor = %llu | digit = %d | t = %llu\n", i*word_length + permutations_index, true_index, temp_word[true_index], divisor, digit, t); permutations[i*word_length + permutations_index] = temp_word[true_index]; permutations_index++; // remove temp[index] char* ptr_to_smem = (char *) temp_word; remove_index_from_shared_memory_array(ptr_to_smem, true_index, temp_length, beg, word_length); temp_length--; } } } __global__ void find_all_permutations_kernel(char* word, int word_length, unsigned long long num_perm, char* permutations) { unsigned long long thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned long long thread_num = blockDim.x * gridDim.x; unsigned long long warp_id = thread_id / 32; unsigned long long warp_num = thread_num % 32 == 0 ? thread_num / 32 : thread_num / 32 + 1; unsigned long long load = num_perm % warp_num == 0 ? num_perm / warp_num : num_perm / warp_num + 1; unsigned long long beg = load * warp_id; unsigned long long end = min(num_perm, beg + load); unsigned long long lane = thread_id % 32; beg += lane; for(int i = beg; i < end; i += 32) { //char* temp = word; unsigned long long divisor = num_perm; int temp_length = word_length; char temp[12]; for (int j = 0; j < word_length; j++) { temp[j] = word[j]; } //printf("divisor = %llu | num_perm = %llu | word_length %d\n", divisor, num_perm, word_length); unsigned long long permutations_index = 0; for (int digit = word_length; digit > 0; digit--) { //printf("divisor before = %llu, digit = %d\n", divisor, digit); divisor /= digit; //printf("divisor after = %llu\n", divisor); unsigned long long t = i / divisor; int index = t % digit; //printf("permutations[%llu] = temp[%d] = %c | divisor = %llu | digit = %d | t = %llu\n", i*word_length + permutations_index, index, temp[index], divisor, digit, t); permutations[i*word_length + permutations_index] = temp[index]; permutations_index++; // remove temp[index] remove_index_from_array(temp, index, temp_length); temp_length--; } } } void generateWord(char* word, int* word_length); char* find_all_permutations(int blockSize, int blockNum, int word_length) { // ALLOCATE float elapsed = 0; cudaEvent_t start, stop; char* word = (char *) malloc(word_length * sizeof(char)); unsigned long long num_perm = 1; for (int k = 1; k <= word_length; num_perm *= k++); // generate word given length generateWord(word, &word_length); printf("Word = %s\n", word); printf("word length %d\n", word_length); // this will contain all of the permutations of the word above char* permutations = (char *) malloc(word_length * num_perm * sizeof(char)); char* cuda_permutations; char* cuda_word; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((void **) &cuda_permutations, word_length * num_perm * sizeof(char)); cudaMalloc((void **) &cuda_word, word_length * sizeof(char)); cudaMemcpy(cuda_permutations, permutations, word_length * num_perm * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(cuda_word, word, word_length * sizeof(char), cudaMemcpyHostToDevice); cudaEventRecord(start, 0); // call kernel find_all_permutations_kernel<<<blockNum, blockSize>>>(cuda_word, word_length, num_perm, cuda_permutations); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); printf("Elapsed time [No shared memory]: %.5f ms\n", elapsed); cudaEventRecord(start, 0); // call kernel find_all_permutations_kernel_shared_memory<<<blockNum, blockSize, blockSize * word_length * sizeof(char)>>>(cuda_word, word_length, num_perm, cuda_permutations); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); printf("Elapsed time [Shared memory]: %.5f ms\n", elapsed); cudaMemcpy(permutations, cuda_permutations, word_length * num_perm * sizeof(char), cudaMemcpyDeviceToHost); // DEALLOCATE cudaFree(cuda_permutations); cudaFree(cuda_word); cudaEventDestroy(start); cudaEventDestroy(stop); free(word); return permutations; } /* Generate the random word given a word_length */ void generateWord(char* word, int* word_length) { int rand_num; const char capital_letters[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; if (*word_length <= 0) { printf("Invalid size. Defaulting to size 10.\n"); *word_length = 10; } for (int i = 0; i < *word_length; i++) { rand_num = rand() % (sizeof(capital_letters) - 1); word[i] = capital_letters[rand_num]; } }
22,008
#include <stdlib.h> #include <stdio.h> __global__ void kernel(int *array){ int index = blockIdx.x*blockDim.x + threadIdx.x; array[index] = index; } int main(){ int num_elements = 256; int num_bytes = num_elements*sizeof(int); int *device_array = 0; int *host_array = 0; host_array = (int *)malloc(num_bytes); cudaMalloc((void **) &device_array, num_bytes); int block_size = 128; int grid_size = num_elements/block_size; kernel<<<grid_size, block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); int i; for(i=0;i<num_elements; ++i) printf("%d\n", host_array[i]); free(host_array); cudaFree(device_array); return 0; }
22,009
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <stdlib.h> #include <string.h> using namespace std; void load(const char* path, float** imageData, int* imgRows, int* imgCols, float** convKernelData, int* convKernelSize, float* convKernelCoeff) { FILE* file; file = fopen(path, "r"); if (file == NULL) { printf("Cannot open file.\n"); return; } fscanf(file, "%d %d ", imgRows, imgCols); *imageData = (float*)malloc(*imgRows * *imgCols * sizeof(float)); for (int i = 0; i < *imgRows * *imgCols; i++) fscanf(file, "%f ", &(*imageData)[i]); fscanf(file, "%d %f ", convKernelSize, convKernelCoeff); *convKernelData = (float*)malloc(*convKernelSize * *convKernelSize * sizeof(float)); for (int i = 0; i < *convKernelSize * *convKernelSize; i++) fscanf(file, "%f ", &(*convKernelData)[i]); fclose(file); } __global__ void applyConvolution_GPU(float* resultImageData, const float* sourceImageData, const int imageRowsSize, const int imageColsSize, const float* convKernelData, const int convKernelSize, const float convKernelCoeff) { int index = blockIdx.x * blockDim.x + threadIdx.x; int row = index / imageColsSize; int col = index % imageColsSize; if (row < convKernelSize / 2 || col < convKernelSize / 2 || row > imageRowsSize - convKernelSize / 2 || col > imageColsSize - convKernelSize / 2) { return; } float roiSum = 0; for (int roiRow = 0; roiRow < convKernelSize; roiRow++) { for (int roiCol = 0; roiCol < convKernelSize; roiCol++) { int imageRow = row - (convKernelSize / 2) + roiRow; int imageCol = col - (convKernelSize / 2) + roiCol; roiSum += sourceImageData[imageRow*imageColsSize + imageCol] * convKernelData[roiRow * convKernelSize + roiCol]; } } resultImageData[row * imageColsSize + col] = roiSum * convKernelCoeff; } int main() { float* imageData = NULL; float* convKernelData = NULL; int imgRows, imgCols, convKernelSize; float convKernelCoeff; load("srcImgData1.txt", &imageData, &imgRows, &imgCols, &convKernelData, &convKernelSize, &convKernelCoeff); unsigned int arraySize = imgRows * imgCols; unsigned int numOfThreadsInBlock = 512; unsigned int numOfBlocks = (arraySize + numOfThreadsInBlock - 1) / numOfThreadsInBlock; float *hostSourceImageData, *hostConvKernelData, *hostResultImageData; float *devSourceImageData, *devConvKernelData, *devResultImageData; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); return 1; } // Allocate memory on GPU cudaMalloc((void**)&devSourceImageData, arraySize * sizeof(float)); cudaMalloc((void**)&devConvKernelData, convKernelSize * convKernelSize * sizeof(float)); cudaMalloc((void**)&devResultImageData, arraySize * sizeof(float)); // Allocate memory on CPU (possible even by malloc or new) cudaHostAlloc((void**)&hostSourceImageData, arraySize * sizeof(float), cudaHostAllocDefault); cudaHostAlloc((void**)&hostConvKernelData, convKernelSize * convKernelSize * sizeof(float), cudaHostAllocDefault); cudaHostAlloc((void**)&hostResultImageData, arraySize * sizeof(float), cudaHostAllocDefault); // Initialize arrays on the host for (int i = 0; i < arraySize; i++) { hostSourceImageData[i] = imageData[i]; hostResultImageData[i] = imageData[i]; } for (int i = 0; i < convKernelSize * convKernelSize; i++) hostConvKernelData[i] = convKernelData[i]; // Copy data CPU -> GPU cudaMemcpy(devSourceImageData, hostSourceImageData, arraySize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(devConvKernelData, hostConvKernelData, convKernelSize * convKernelSize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(devResultImageData, hostResultImageData, arraySize * sizeof(float), cudaMemcpyHostToDevice); // Launch a kernel on the GPU applyConvolution_GPU<<<numOfBlocks, numOfThreadsInBlock>>>(devResultImageData, devSourceImageData, imgRows, imgCols, devConvKernelData, convKernelSize, 1 / 256.0); cudaDeviceSynchronize(); // wait for kernel end // Copy data GPU -> CPU cudaMemcpy(hostResultImageData, devResultImageData, arraySize * sizeof(float), cudaMemcpyDeviceToHost); // free memory blocks on CPU cudaFreeHost(hostSourceImageData); cudaFreeHost(hostConvKernelData); cudaFreeHost(hostResultImageData); // free memory blocks on GPU cudaFree(devSourceImageData); cudaFree(devConvKernelData); cudaFree(devResultImageData); return 0; }
22,010
#include <stdio.h> #include <stdlib.h> #include <string> #include <iostream> using namespace std; #define CAFFE_CUDA_NUM_THREADS 196 inline int CAFFE_GET_BLOCKS(const int N) { return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ConvForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width,const int conved_height, const int conved_width,const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data,const Dtype* const weight,const Dtype* const bias,const bool bias_term_) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x){ const int pw = index % conved_width; // width position of output const int ph = (index / conved_width) % conved_height; const int c = (index / conved_width / conved_height) % channels; const int n = index / conved_width / conved_height / channels; int hstart = ph * stride_h - pad_h; // input pointer starting point int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); // boundary int wend = min(wstart + kernel_w, width + pad_w); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); // height=output hight wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; const Dtype* const weight_slice = weight + c * kernel_h * kernel_w; int khstart=hend<kernel_h?kernel_h-hend:0; int kwstart=wend<kernel_w?kernel_w-wend:0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]*weight_slice[(khstart+h-hstart) * kernel_w + (kwstart+w-wstart)]; // (h-hstart)=>0~kernel_h } } if(bias_term_) { aveval+=bias[c]; } top_data[index] = aveval; } } template <typename Dtype> __global__ void ConvForwardShared(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width,const int conved_height, const int conved_width,const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data,const Dtype* const weight,const Dtype* const bias,const bool bias_term_) { __shared__ float s_bottom[CAFFE_CUDA_NUM_THREADS], s_weight[CAFFE_CUDA_NUM_THREADS]; /*for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads * 4; index += blockDim.x * gridDim.x){ s_bottom[index%CAFFE_CUDA_NUM_THREADS]= bottom_data[index]; s_weight[index%CAFFE_CUDA_NUM_THREADS]= weight[index%CAFFE_CUDA_NUM_THREADS + blockIdx.x * kernel_h * kernel_w * 1]; }*/ int index = blockIdx.x * blockDim.x + threadIdx.x; if ( index % (kernel_h * kernel_w) < kernel_h * kernel_w){ s_weight[index % (kernel_h * kernel_w)]= weight[index % (kernel_h * kernel_w) + blockIdx.x * kernel_h * kernel_w * 1]; } /*#pragma unroll for (int i=index % blockDim.x ; i < blockDim.x * 4 ; i += blockDim.x){ s_bottom[i]= bottom_data[blockIdx.x * blockDim.x * 4 +i]; }*/ __syncthreads(); //for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads ; index += blockDim.x * gridDim.x){ if (index < nthreads){ const int pw = (index * 2) % conved_width; // width position of output const int ph = (index * 2 / conved_width) * 2 % conved_height; const int c = (index * 4 / conved_width / conved_height) % channels; const int n = index / conved_width / conved_height / channels;// =0 //int hend = min(hstart + kernel_h, height); //int wend = min(wstart + kernel_w, width); const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; /*const Dtype* const weight_slice = weight + c * kernel_h * kernel_w;*/ for(int j=0; j<2; j++) for(int i=0; i<2; i++) { Dtype aveval = 0; //int hstart = (ph + j )* stride_h - pad_h; // input pointer starting point //int wstart = (pw + i) * stride_w - pad_w; const int hstart = (ph + j )* stride_h - pad_h >0? (ph + j )* stride_h - pad_h :0; const int wstart = (pw + i) * stride_w - pad_w >0? (pw + i) * stride_w - pad_w :0; const int hend = (ph + j )* stride_h - pad_h + kernel_h< height? (ph + j )* stride_h - pad_h + kernel_h : height; const int wend = (pw + i) * stride_w - pad_w + kernel_w< width? (pw + i) * stride_w - pad_w + kernel_w : width; const int khstart=hend<kernel_h?kernel_h-hend:0; const int kwstart=wend<kernel_w?kernel_w-wend:0; #pragma unroll for (int h = hstart; h < hend; ++h) { #pragma unroll for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w ] * s_weight[(khstart+ h -hstart) * kernel_w + (kwstart + w -wstart)]; // (h-hstart)=>0~kernel_h } } if(bias_term_) aveval+=bias[c]; top_data[(c * conved_height + ph + j) * conved_width + pw + i] = aveval; } } } template <typename Dtype> __global__ void Padding(const int nthreads, const Dtype* const bottom_in, Dtype* bottom_out, const int height, const int width, int pad_h, const int pad_w) { //extern __shared__ Dtype s_bottom[]; //int index = blockIdx.x * blockDim.x + threadIdx.x; int index = threadIdx.x; if (index>=width+pad_w*3 && index < (width+pad_w*2)*(height+pad_h*2)-width-pad_w*3 && index %(width+pad_w*2) != 0 && index %(width+pad_w*2) != width+pad_w*2-1){ bottom_out[index+blockIdx.x * blockDim.x ]=bottom_in[(index/(width+pad_w*2)-pad_h)*width + index%(width+pad_w*2)-pad_w + blockIdx.x * height*width]; //printf("bid=%d, bdim=%d", blockIdx.x, blockDim.x); } else bottom_out[index+blockIdx.x * blockDim.x ]=0; } template <typename Dtype> __global__ void ConvForwardPadded(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width,const int conved_height, const int conved_width,const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data,const Dtype* const weight,const Dtype* const bias,const bool bias_term_) { __shared__ float s_bottom[CAFFE_CUDA_NUM_THREADS], s_weight[CAFFE_CUDA_NUM_THREADS]; int index = blockIdx.x * blockDim.x + threadIdx.x; if ( index % (kernel_h * kernel_w) < kernel_h * kernel_w){ s_weight[index % (kernel_h * kernel_w)]= weight[index % (kernel_h * kernel_w) + blockIdx.x * kernel_h * kernel_w * 1]; } /*#pragma unroll for (int i=index % blockDim.x ; i < blockDim.x * 4 ; i += blockDim.x){ s_bottom[i]= bottom_data[blockIdx.x * blockDim.x * 4 +i]; }*/ __syncthreads(); if (index < nthreads){ const int pw = (index * 2) % conved_width; // width position of output const int ph = (index * 2 / conved_width) * 2 % conved_height; const int c = (index * 4 / conved_width / conved_height) % channels; const int n = index / conved_width / conved_height / channels;// =0 const Dtype* const bottom_slice = bottom_data + (n * channels + c) * (height+pad_h*2) * (width+pad_w*2); for(int j=0; j<2; j++) for(int i=0; i<2; i++) { Dtype aveval = 0; const int hstart = (ph + j )* stride_h ; const int wstart = (pw + i) * stride_w ; //const int hend = hstart + kernel_h; //const int wend = (pw + i) * stride_w - pad_w + kernel_w; const int khstart=0; const int kwstart=0; #pragma unroll for (int h = hstart; h < hstart + kernel_h; ++h) { #pragma unroll for (int w = wstart; w < wstart + kernel_w; ++w) { aveval += bottom_slice[h * (width+pad_w*2) + w ] * s_weight[(h -hstart) * kernel_w + (w -wstart)]; // (h-hstart)=>0~kernel_h } } if(bias_term_) aveval+=bias[c]; top_data[(c * conved_height + ph + j) * conved_width + pw + i] = aveval; } } } int main(int argc, char* argv[]) { //filter 3 3 512 dw //input 14 14 512 //float* weight = new float[1024]; //float* bottom = new float[1024]; /*printf("here\n"); float *a,*b,*c; cudaMallocManaged(&a, 256*sizeof(float)); cudaMallocManaged(&b, 256*sizeof(float)); cudaMallocManaged(&c, 256*sizeof(float)); float *out = new float[256]; for(int i=0;i<256;i++) {a[i]=1;b[i]=2;} test<<<16, 256>>>(a,b,c); cudaMemcpy(out, c, 256*sizeof(float), cudaMemcpyDeviceToHost); printf("c[3]=%f",out[3]); */ const int channels = 512; const int height = 14; const int width = 14; const int kernel_h = 3; const int kernel_w = 3; const int stride_h = 1; const int stride_w = 1; const int pad_h = 1; const int pad_w = 1; const int conved_height = height; const int conved_weight = width; const bool bias_term = false; const int n=channels * height * width; const int count = n; float *d_weight, *d_bottom, *d_bottom_padded, *d_top1, *d_top2; cudaMallocManaged(&d_weight, n*sizeof(float)); cudaMallocManaged(&d_bottom, n*sizeof(float)); cudaMallocManaged(&d_top1, n*sizeof(float)); cudaMallocManaged(&d_top2, n*sizeof(float)); for(int i=0;i<n;i++) d_weight[i]=((double) rand() / (RAND_MAX)); for(int i=0;i<n;i++) d_bottom[i]=((double) rand() / (RAND_MAX)); int pcount = (height+pad_h*2)*(width+pad_w*2)*channels; printf("numblocks=%d", CAFFE_GET_BLOCKS(n)); ConvForward<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, d_bottom, n, channels, height, width,conved_height,conved_weight,kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, d_top1,d_weight,0,bias_term); int nb=CAFFE_GET_BLOCKS(n); int bs=CAFFE_CUDA_NUM_THREADS/4; int nt=n/4; /*ConvForwardShared<float><<<nb, bs>>>( nt, d_bottom, n, channels, height, width,conved_height,conved_weight,kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, d_top2,d_weight,0,bias_term);*/ int numPadThreads=(height+pad_h*2)*(width+pad_w*2); cudaMallocManaged(&d_bottom_padded, pcount*sizeof(float)); Padding<float><<<(pcount + numPadThreads - 1) / numPadThreads, numPadThreads>>>(pcount, d_bottom, d_bottom_padded, height, width, pad_h, pad_w ); float *bottom_padded= new float[pcount]; cudaMemcpy(bottom_padded, d_bottom_padded, pcount*sizeof(float), cudaMemcpyDeviceToHost); for(int j=0;j< (height+pad_h*2)+10; j++){ for(int i=0; i< (width+pad_w*2); i++) printf("%.1f ", bottom_padded[i+j*(width+pad_w*2)]); printf("\n"); } ConvForwardPadded<float><<<nb, bs>>>( nt, d_bottom_padded, n, channels, height, width,conved_height,conved_weight,kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, d_top2,d_weight,0,bias_term); float *out1 = new float[n]; float *out2 = new float[n]; cudaMemcpy(out1, d_top1, n*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(out2, d_top2, n*sizeof(float), cudaMemcpyDeviceToHost); int c=0; for(int i=0;i<n;i++) if(out1[i]!=out2[i]&&c<20) {printf("top1[%d]=%f, top2[%d]=%f", i, out1[i], i, out2[i]); c++;} return 0; }
22,011
// filename: vmult!.cu // a simple CUDA kernel to element multiply two vectors C=alpha*A.*B extern "C" // ensure function name to be exactly "vmult!" { __global__ void vmult(const int lengthA, const double alpha, const double *a, const double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthA) { c[i] = alpha*a[i] * b[i]; } } }
22,012
#include "matrix.cuh" __device__ float device_vector_get(matrix_t* v, unsigned int x) { //assert(v->rows == 1); //assert(v->cols >= 0); return v->matrix[x]; } __device__ void device_vector_set(matrix_t* v, unsigned int x, float value) { //assert(v->rows == 1); //assert(v->cols >= 0); v->matrix[x] = value; }
22,013
#include<stdio.h> #include<stdlib.h> #include<cuda.h> __global__ void add(int *a, int *b,int * c) { int col=10; int i= blockIdx.y*blockDim.y+threadIdx.y; int j=blockIdx.x*blockDim.x+threadIdx.x; *(c + i * col +j)= *(a + i * col + j) + *(b + i * col + j); } int main() { int row = 10; int col = 10; int *a; int *b; int *c; int *d_a; int *d_b; int *d_c; a=(int *) malloc(row * col * sizeof(int)); b=(int *) malloc(row * col * sizeof(int)); c=(int *) malloc(row * col * sizeof(int)); int i,j; for(i=0;i<row;i++) { for(j=0;j<col;j++) { *(a + i * col + j)= 11; *(b + i * col + j)= 10; } } cudaMalloc(&d_a,row*col*sizeof(int)); cudaMalloc(&d_b,row*col*sizeof(int)); cudaMalloc(&d_c, row*col*sizeof(int)); cudaMemcpy(d_a,a,row*col*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,row*col*sizeof(int),cudaMemcpyHostToDevice); add<<<1,100>>>(d_a,d_b,d_c); cudaMemcpy(c,d_c,row*col*sizeof(int),cudaMemcpyDeviceToHost); for(i=0;i<row;i++) { for(j=0;j<col;j++) { printf(" c[%d][%d] = %d\n",i,j, *(c + i * col + j) ); } } return 0; }
22,014
#include <stdio.h> #include <stdlib.h> #define CUDA_CHECK_ERROR(X)({\ if((X) != cudaSuccess){\ fprintf(stderr, "CUDA error %d (%s:%d): %s\n", (X), __FILE__, __LINE__, cudaGetErrorString((cudaError_t)(X)));\ exit(1);\ }\ }) #define MALLOC_CHECK_ERROR(X)({\ if ((X) == 0){\ fprintf(stderr, "Malloc error (%s:%d): %i\n", __FILE__, __LINE__, (X));\ exit(1);\ }\ }) // Returns True if |a - b| <= eps inline bool compare_float(float a, float b){ const float eps = 1e-7f; if (a > b) return a - b <= eps; else return b - a <= eps; } // Initialise the vector v of n elements to random values void init_vec(float *v, int n){ for(int i = 0; i < n; i++){ v[i] = rand() % 100 * 0.3234f; } } // kernel to perform vector addition __global__ void vector_add(float *a, float *b, float *c, int n){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main(void){ int n = 2000; float *A = (float*) malloc(n * sizeof(float)); float *B = (float*) malloc(n * sizeof(float)); float *C = (float*) malloc(n * sizeof(float)); MALLOC_CHECK_ERROR(A && B && C); init_vec(A, n); init_vec(B, n); float *dev_A, *dev_B, *dev_C; CUDA_CHECK_ERROR(cudaMalloc(&dev_A, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMalloc(&dev_B, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMalloc(&dev_C, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMemcpy(dev_A, A, sizeof(float) * n, cudaMemcpyHostToDevice)); CUDA_CHECK_ERROR(cudaMemcpy(dev_B, B, sizeof(float) * n, cudaMemcpyHostToDevice)); int nThreads = 1024; int nBlocks = (n + nThreads - 1) / nThreads; vector_add<<<nBlocks, nThreads>>>(dev_A, dev_B, dev_C, n); CUDA_CHECK_ERROR((cudaError_t)cudaGetLastError()); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); CUDA_CHECK_ERROR(cudaMemcpy(C, dev_C, sizeof(float) * n, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); // check the result is correct for(int i = 0; i < n; i++){ bool sums_equal = compare_float(C[i], A[i] + B[i]); if(!sums_equal){ fprintf(stderr, "Sum is not correct.\n"); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); free(A); free(B); free(C); return 1; } } CUDA_CHECK_ERROR(cudaFree(dev_A)); CUDA_CHECK_ERROR(cudaFree(dev_B)); CUDA_CHECK_ERROR(cudaFree(dev_C)); free(A); free(B); free(C); printf("All good.\n"); return 0; }
22,015
#define N 3 #include <stdio.h> __global__ void matrixMult (int *a, int *b, int *c, int width) { int i, sum = 0; int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; if(col < width && row < width) for (i = 0; i< width; i++) { sum += a[row * width + i] * b[i * width + col]; } c[row * width + col] = sum; } int main() { int a[N][N], b[N][N], c[N][N]; int *dev_a, *dev_b, *dev_c; int i=1; for (int y = 0; y < N; y++) { for (int x = 0; x < N; x++) a[y][x]=i++; } i=9; for (int y = 0; y < N; y++) { for (int x = 0; x < N; x++) b[y][x]=i--; } int size = N * N * sizeof(int); cudaMalloc((void **) &dev_a, size); cudaMalloc((void **) &dev_b, size); cudaMalloc((void **) &dev_c, size); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); dim3 dimGrid(1, 1); dim3 dimBlock(N, N); matrixMult<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N); cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); for (int y = 0; y < N; y++) { for (int x = 0; x < N; x++) { printf("%d \t", c[y][x]); } printf("\n"); } return 0; }
22,016
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <cuda.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 #define BLOCK_SIZE 1024 // STRUCTS typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; //PROTOTYPES //__global__ void calc_hist(PPMImage *d_a, float *h, int n); double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } // Kernel - Função que será executada na GPU __global__ void calc_hist(PPMPixel *d_a, float *h, int n) { int idx_h; // Padrão comum para cálculo do índice único de cada thread. int idx = blockDim.x * blockIdx.x + threadIdx.x; // Threads a mais não deveram participar dos incrementos que irão gerar o histograma. if (idx < n) { // Através dos valores de Red, Green e Blue do pixel representado pela thread, // é cálculado o índice para o histograma, o qual deve ser incrementado, contabilizando assim // a quantidade de pixels com o mesmo valor RGB. idx_h = (d_a[idx].red * 16) + (d_a[idx].green * 4) + (d_a[idx].blue); atomicAdd((float * )&h[idx_h],1); } __syncthreads(); } void Histogram(PPMImage *image, float *h) { cudaEvent_t start, stop; cudaEvent_t startAlloc, stopAlloc; cudaEvent_t startMemCp, stopMemCp; cudaEvent_t startMemRec, stopMemRec; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&startAlloc); cudaEventCreate(&stopAlloc); cudaEventCreate(&startMemCp); cudaEventCreate(&stopMemCp); cudaEventCreate(&startMemRec); cudaEventCreate(&stopMemRec); int i; float n = image->y * image->x; for (i = 0; i < n; i++) { image->data[i].red = floor((image->data[i].red * 4) / 256); image->data[i].blue = floor((image->data[i].blue * 4) / 256); image->data[i].green = floor((image->data[i].green * 4) / 256); } PPMPixel *d_a; float * d_h; cudaEventRecord(startAlloc); //tempo_GPU_offload_enviar if (cudaMalloc( (void **) &d_a, sizeof(PPMPixel) * n) != cudaSuccess) { printf("error cudaMalloc buffer"); exit(1); } if (cudaMalloc( (void **) &d_h, sizeof(float) * 64) != cudaSuccess) { printf("error cudaMalloc hist"); exit(1); } cudaEventRecord(stopAlloc); cudaEventSynchronize(stopAlloc); cudaEventRecord(startMemCp); // Copia as entradas no dispositivo. cudaMemcpy(d_a, image->data, sizeof(PPMPixel) * n, cudaMemcpyHostToDevice); cudaMemcpy(d_h, h, sizeof(float) * 64, cudaMemcpyHostToDevice); cudaEventRecord(stopMemCp); cudaEventSynchronize(stopMemCp); cudaEventRecord(start); // Executa a funcao que calcula os dados para o histograma. Sendo que cada thread irá tratar de um pixel. // Foi utilizado o número máximo de threads por bloco (1024). O número de blocos escolhido foi o de // N pixels / número de threads por bloco calc_hist <<< ceil((float) n/BLOCK_SIZE), BLOCK_SIZE >>> (d_a, d_h, n); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventRecord(startMemRec); // Copia o resultado armazenado no device para a Memoria da CPU (HOST) d_h => h. cudaMemcpy(h, d_h, sizeof(float) * 64, cudaMemcpyDeviceToHost); cudaEventRecord(stopMemRec); cudaEventSynchronize(stopMemRec); // Realiza a normalização dos dados do histograma. for(i=0; i < 64; i++) h[i] = h[i]/n; cudaFree(d_a); cudaFree(d_h); // Imprime Métricas do Código /* float millisecondsAlloc = 0; cudaEventElapsedTime(&millisecondsAlloc, startAlloc, stopAlloc); printf("Allocation Execution time: %f\n", millisecondsAlloc/1e3); float millisecondsMemCp = 0; cudaEventElapsedTime(&millisecondsMemCp, startMemCp, stopMemCp); printf("Memomry Send Execution time: %f\n", millisecondsMemCp/1e3); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Kernel Execution time: %f\n", milliseconds/1e3); float millisecondsRec = 0; cudaEventElapsedTime(&millisecondsRec, startMemRec, stopMemRec); printf("Copy Received Execution time: %f\n", milliseconds/1e3); float millisecondsTotal = millisecondsAlloc + millisecondsMemCp + milliseconds + millisecondsRec; printf("Total Execution time: %f\n", millisecondsTotal/1e3); */ } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } double t_start, t_end; int i; char *filename = argv[1]; //Recebendo o arquivo!; //scanf("%s", filename); PPMImage *image = readPPM(filename); float *h = (float*)malloc(sizeof(float) * 64); //Inicializar h for(i=0; i < 64; i++) h[i] = 0.0; t_start = rtclock(); Histogram(image, h); t_end = rtclock(); for (i = 0; i < 64; i++){ printf("%0.3f ", h[i]); } printf("\n"); //fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); free(h); } /*********** RA: ra189918 - Andrius Sperque RESULTS (Table format): entrada: ************************************ arq1.ppm ******************************************* tempo_serial: 0.322303s tempo_GPU_criar_buffer: 0.001186 tempo_GPU_offload_enviar: 0.000634 tempo_kernel: 0.002781 tempo_GPU_offload_receber: 0.002781 GPU_total: 0.004621 speedup (tempo_serial / GPU_total): 0.322303s / 0.004621 = 69.747457 entrada: ************************************ arq2.ppm ******************************************* tempo_serial: 0.585311s tempo_GPU_criar_buffer: 0.001291 tempo_GPU_offload_enviar: 0.001284 tempo_kernel: 0.008463 tempo_GPU_offload_receber: 0.008463 GPU_total: 0.011060 speedup (tempo_serial / GPU_total): 0.585311 / 0.011060 = 52.921428 entrada: ************************************ arq3.ppm ******************************************* tempo_serial: 1.679165s tempo_GPU_criar_buffer: 0.002209 tempo_GPU_offload_enviar: 0.005373 tempo_kernel: 0.034532 tempo_GPU_offload_receber: 0.034532 GPU_total: 0.042178 speedup (tempo_serial / GPU_total): 1.679165 / 0.042178 = 39.8113945 */
22,017
#include "includes.h" __global__ void calculate_A_ch_1_2(float* rho, float* dz, float* s_a, float* xx_or_yy, float* s_b, float K, int npix, int nchannels, int nimages, float* A_ch) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int c = blockIdx.z*blockDim.z + threadIdx.z; if (i < npix && j < nimages) { A_ch[c*npix*nimages + j*npix + i] = (rho[c*npix + i] / dz[i])*(K*s_a[c * nimages * 3 + j] - xx_or_yy[i] * s_b[c * nimages * 3 + j]); } }
22,018
// MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void LaplaceSolver( double* prev, int prevLen0, double* next, int nextLen0, int* sizes, int sizesLen0, int* extV, int extVLen0, int* intV, int intVLen0, double* w, int wLen0); // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Copy( double* prev, int prevLen0, double* next, int nextLen0); // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Square( double* prev, int prevLen0, double* next, int nextLen0, double* delta, int deltaLen0); // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Delta( double* prev, int prevLen0, double* next, int nextLen0, double* delta, int deltaLen0); // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Max( double* prev, int prevLen0, double* next, int nextLen0); // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Sum( double* prev, int prevLen0, double* next, int nextLen0); // MyCudafy.CudafyMultiDimentionalArray __constant__ double _a[100]; #define _aLen0 100 // MyCudafy.CudafyMultiDimentionalArray __constant__ double _b[1]; #define _bLen0 1 // MyCudafy.CudafyMultiDimentionalArray __constant__ int _sizes[2]; #define _sizesLen0 2 // MyCudafy.CudafyMultiDimentionalArray __constant__ double _lengths[2]; #define _lengthsLen0 2 // MyCudafy.CudafyMultiDimentionalArray __constant__ int _intV[3]; #define _intVLen0 3 // MyCudafy.CudafyMultiDimentionalArray __constant__ int _extV[3]; #define _extVLen0 3 // MyCudafy.CudafyMultiDimentionalArray __constant__ double _w[3]; #define _wLen0 3 // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void LaplaceSolver( double* prev, int prevLen0, double* next, int nextLen0, int* sizes, int sizesLen0, int* extV, int extVLen0, int* intV, int intVLen0, double* w, int wLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < intV[(sizesLen0)]; i += blockDim.x * gridDim.x) { int num = 0; int j = 0; int num2 = i; while (j < sizesLen0) { num += (1 + num2 % (sizes[(j)] - 2)) * extV[(j)]; num2 /= sizes[(j)] - 2; j++; } double num3 = prev[(num)] * w[(sizesLen0)]; for (j = 0; j < sizesLen0; j++) { num3 += (prev[(num - extV[(j)])] + prev[(num + extV[(j)])]) * w[(j)]; } next[(num)] = num3; } } // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Copy( double* prev, int prevLen0, double* next, int nextLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < prevLen0; i += blockDim.x * gridDim.x) { next[(i)] = prev[(i)]; } } // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Square( double* prev, int prevLen0, double* next, int nextLen0, double* delta, int deltaLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < prevLen0; i += blockDim.x * gridDim.x) { double num = next[(i)]; num *= num; delta[(i)] = num; } } // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Delta( double* prev, int prevLen0, double* next, int nextLen0, double* delta, int deltaLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < prevLen0; i += blockDim.x * gridDim.x) { double num = next[(i)] * (prev[(i)] - next[(i)]); num *= num; delta[(i)] = num; } } // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Max( double* prev, int prevLen0, double* next, int nextLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < nextLen0; i += blockDim.x * gridDim.x) { next[(i)] = 0.0; int num = 0; while (num * nextLen0 + i < prevLen0) { int num2 = num * nextLen0 + i; if (prev[(num2)] > next[(i)]) { next[(i)] = prev[(num2)]; } num++; } } } // MyCudafy.CudafyMultiDimentionalArray extern "C" __global__ void Sum( double* prev, int prevLen0, double* next, int nextLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < nextLen0; i += blockDim.x * gridDim.x) { next[(i)] = 0.0; int num = 0; while (num * nextLen0 + i < prevLen0) { int num2 = num * nextLen0 + i; next[(i)] += prev[(num2)]; num++; } } }
22,019
// From CUDA for Engineers // Listing 6.1: parallel_dot/kernel.cu #include <cuda_runtime.h> #include <iostream> #include <stdio.h> #define TPB 64 #define ATOMIC 1 // 0 for non-atomic addition #define N 1024 __global__ void dotKernel(int *d_res, const int *d_a, const int *d_b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; const int s_idx = threadIdx.x; // shared & sync __shared__ int s_prod[TPB]; s_prod[s_idx] = d_a[i] * d_b[i]; __syncthreads(); // only pick one thread to do the sum & addition // we happen to pick the 1st thread if (s_idx == 0) { int blockSum = 0; for (int j = 0; j < blockDim.x; j++) { blockSum += s_prod[j]; } printf("Block_%d, blockSum = %d\n", blockIdx.x, blockSum); if (ATOMIC) { atomicAdd(d_res, blockSum); } else { *d_res += blockSum; } } } void dotLauncher(int *res, const int *a, const int *b, int n) { int *d_res; int *d_a = 0; int *d_b = 0; cudaMalloc(&d_a, n*sizeof(int)); cudaMalloc(&d_b, n*sizeof(int)); cudaMalloc(&d_res, sizeof(int)); cudaMemset(d_res, 0, sizeof(n)); cudaMemcpy(d_a, a, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, n*sizeof(int), cudaMemcpyHostToDevice); dotKernel<<<(n+TPB-1)/TPB, TPB>>>(d_res, d_a, d_b, n); cudaMemcpy(res, d_res, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_res); } int main() { std::cout << "parallel_dot: reduction\n"; int cpu_res = 0; int gpu_res = 0; int *a = (int*)malloc(N*sizeof(int)); int *b = (int*)malloc(N*sizeof(int)); // initialize for (int i = 0; i < N; i++) { a[i] = 1; b[i] = 1; } // cpu compute for (int i = 0; i < N; i++) { cpu_res += a[i]*b[i]; } std::cout << "cpu result = " << cpu_res << "\n"; // gpu compute dotLauncher(&gpu_res, a, b, N); std::cout << "gpu result = " << gpu_res << "\n"; free(a); free(b); return 0; }
22,020
// // Created by songzeceng on 2020/11/8. // #include <stdio.h> struct __align__(8) { int x; int y; } A; struct __align__(16) { int x; int y; int z; } B; int main() { int deviceCount; cudaGetDeviceCount(&deviceCount); int device; for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device %d has compute capability %d.%d.\n",device, deviceProp.major, deviceProp.minor); } return 0; }
22,021
#include<stdio.h> #define N 200 __global__ void addvector(int* a,int* b,int* c) { int x=threadIdx.x; if(x<N) { c[x]=a[x]+b[x]; } } int main() { int ch,arr[N],brr[N],result[N],*gpu1,*gpu2,*res; printf("Enter A Number Between 1 and 200 : "); scanf("%d",&ch); ch=(ch>N)?N:ch; printf("Enter Numbers Of First Array : \n"); int i; for(i=0;i<ch;i++) { scanf("%d",&arr[i]); } printf("Enter Numbers Of Second Array : \n"); for(i=0;i<ch;i++) { scanf("%d",&brr[i]); } for(i=ch;i<N;i++) { arr[i]=0; brr[i]=0; } // dim3 numBlocks(1,1); // dim3 numThreads(N,1); cudaMalloc((void**)&gpu1,N * sizeof(int)); cudaMalloc((void**)&gpu2,N * sizeof(int)); cudaMalloc((void**)&res,N * sizeof(int)); cudaMemcpy(gpu1,arr,N * sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(gpu2,brr,N * sizeof(int),cudaMemcpyHostToDevice); addvector<<<1,N>>>(gpu1,gpu2,res); cudaThreadSynchronize(); cudaMemcpy(result,res,N * sizeof(int),cudaMemcpyDeviceToHost); printf("The Result Of Addiing The Array Elements Are : \n"); for(i=0;i<ch;i++) { printf("%d\t",result[i]); } printf("\n"); cudaFree(gpu1); cudaFree(gpu2); cudaFree(res); return 0; }
22,022
// 各ブロックが出力の各チャネルを操作する // 各スレッドが出力の各ピクセルの値を決定する template <int InSize, int InChannels, int InSize2, int OutSize, int OutSize2, int KernelSize, int KernelSize2> __global__ void conv2D(float* inImg, float* outImg, float* weight, float* bias) { /* BlockDim.x == the number of output channels threadDim == (inputSize.x, inputSize.y, 1) */ __shared__ float sharedImg[InSize2]; const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; const unsigned int pos = tx + InSize * ty; float sum = 0; #pragma unroll for (unsigned int ch = 0; ch < InChannels; ch++) { __syncthreads(); sharedImg[pos] = inImg[pos + InSize2 * ch]; __syncthreads(); if (tx > OutSize+1 || tx < 2 || ty > OutSize+1 || ty < 2) { continue; } #pragma unroll 5 for (unsigned int i = 0; i < 5; i++) { #pragma unroll 5 for (unsigned int j = 0; j < 5; j++) { unsigned int kernelPos = i + KernelSize * j + KernelSize2 * ch; sum += sharedImg[tx-j + InSize * (ty-j)] * weight[kernelPos] + bias[kernelPos]; } } } outImg[(tx-2) + (ty-2) * OutSize + blockIdx.x] = sum; } template<int OutSize, int OutSize2> __global__ void maxpool(float* inImg, float* outImg) { const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; const unsigned int tx2 = tx * 2; const unsigned int ty2 = ty * 2; const unsigned int ch = blockIdx.x * OutSize2; outImg[tx + OutSize * ty + ch] = fmaxf( fmaxf( inImg[tx2 + OutSize * ty2 + ch], inImg[(tx2+1) + OutSize * ty2 + ch]), fmaxf( inImg[tx2 + OutSize * (ty2+1) + ch], inImg[(tx2+1) + OutSize * (ty2+1) + ch]) ); } template <int InSize> __global__ void dense(float* input, float* output, float* weight, float* bias) { /* gridDim == (output size, 1, 1) blockDim == (input size, 1, 1) */ const unsigned int tx = threadIdx.x; const unsigned int bx = blockIdx.x; const unsigned int bSize = gridDim.x; __shared__ float sharedOut[InSize]; sharedOut[tx] = input[tx] * weight[tx + bSize * bx]; __syncthreads(); for (unsigned int i = InSize / 2; i > 0; i >>=1){ if (tx < i) { sharedOut[tx] = sharedOut[tx + i]; } __syncthreads(); } if (tx == 0){ output[bx] = sharedOut[0] + bias[bx]; } } __global__ void relu(float* input, float* output) { const unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; output[id] = fmaxf(0, input[id]); } template <int InSize> __global__ void softmax(float* input, float* output) { const unsigned int tx = threadIdx.x; const float exp = expf(input[tx]); __shared__ float sharedOut[InSize]; sharedOut[tx] = exp; for (unsigned int i = InSize / 2; i > 0; i >>=1){ if (tx < i) { sharedOut[tx] += sharedOut[tx + i]; } __syncthreads(); } output[tx] = exp / sharedOut[0]; } template <int InSize> __global__ void dense_relu(float* input, float* output, float* weight, float* bias) { /* gridDim == (output size, 1, 1) blockDim == (input size, 1, 1) */ const unsigned int tx = threadIdx.x; const unsigned int bx = blockIdx.x; const unsigned int bSize = gridDim.x; __shared__ float sharedOut[InSize]; sharedOut[tx] = input[tx] * weight[tx + bSize * bx]; __syncthreads(); for (unsigned int i = InSize / 2; i > 0; i >>=1){ if (tx < i) { sharedOut[tx] = sharedOut[tx + i]; } __syncthreads(); } if (tx == 0){ output[bx] = fmaxf(sharedOut[0] + bias[bx], 0); } } template <int OutChannels, int InSize2> __global__ void dense_softmax(float* input, float* output, float* weight, float* bias) { /* gridDim == (1, 1, 1) blockDim == (500, 1, 1) */ const unsigned int tx = threadIdx.x; const unsigned int tSize = blockDim.x; const unsigned int tSizeh = tSize >> 1; const unsigned int weightPos = OutChannels * tx; const unsigned int channelPos = tx >= tSizeh; // __shared__ float sharedOut[InSize2]; __shared__ float sharedSum[OutChannels]; #pragma unroll for (unsigned int ch = 0; ch < OutChannels; ch+=2) { sharedOut[tx] = input[tx] * weight[weightPos + ch * tSize]; sharedOut[tx + tSize] = input[tx] * weight[weightPos + (ch+1) * tSize]; __syncthreads(); unsigned int sift = 0; for (unsigned int i = tSize; i > 4; i >>= 1) { sift += channelPos ? (i >> 1) : 0; if (tx < i) { sharedOut[tx + sift] += sharedOut[tx + (i >> 1) + sift]; } __syncthreads(); } if (tx == 0 || tx == tSizeh) { sharedSum[ch + 2 * channelPos] = expf(sharedOut[tx] + bias[ch + channelPos]); } } __syncthreads(); if (tx > OutChannels) return; const float exp = sharedSum[tx]; for (unsigned int i = tSize / 2; i > 0; i >>=1){ if (tx < i) { sharedSum[tx] += sharedSum[tx + i]; } __syncthreads(); } output[tx] = exp / sharedOut[0]; }
22,023
// // Created by ameen on 30/04/20. // #include "Data.cuh" Data::Data(std::string tableName) { joinObject = false; this->tableName = tableName; this->writeHappened = false; mdata = Metadata(tableName); chunkSize = ((500 * 1024 * 1024) / (mdata.rowSize)); // read 500 MB readCount = 0; f.open(utils::getDataFileName(tableName), std::ios::binary); o.open(utils::getTempFileName(tableName), std::ios::binary); } int Data::readRow(void *data) { f.read(static_cast<char *>(data), mdata.rowSize); return 0; } int Data::writeRow(void *data) { writeHappened = true; mdata.rowCount += 1; o.write(static_cast<const char *>(data), mdata.rowSize); return 0; } int Data::read (void *data){ if(readCount + chunkSize < mdata.rowCount){ f.read(static_cast<char *>(data), chunkSize * mdata.rowSize); readCount += chunkSize; return chunkSize; } else if (readCount < mdata.rowCount){ // check if the read works if(!f.is_open()) printf("File not open\n"); f.read(static_cast<char *>(data), (mdata.rowCount - readCount) * mdata.rowSize); int rowsRead = mdata.rowCount - readCount; readCount = mdata.rowCount; // utils::printMultiple(data, mdata.datatypes, mdata.rowSize, mdata.rowCount); // printf("____________________________________________("); return rowsRead; } else return -1; } void Data::restartRead(){ readCount = 0; f.seekg(0, std::ios::beg); } int Data::write(void *data, int numBytes){ writeHappened = true; if(joinObject) mdata.rowCount += numBytes/mdata.rowSize; if(!o.write((char *)data, numBytes)) return -1; else return numBytes; } Data::~Data() { //rename the temp file as data file if(joinObject){ remove(utils::getDataFileName(tableName).c_str()); return; } if(writeHappened){ // printf("inside destructor"); remove(utils::getDataFileName(tableName).c_str()); rename(utils::getTempFileName(tableName).c_str(), utils::getDataFileName(tableName).c_str()); } else remove(utils::getTempFileName(tableName).c_str()); } Data::Data(const std::string& t1, const std::string& t2) { joinObject = true; this->tableName = t1 + "_" + t2 + std::to_string(rand()) + ".join"; this->writeHappened = false; // TODO: Change mdata to a new metadata of join of both tables // create metadata for join table this->mdata = Metadata(t1); mdata.tableName = tableName; mdata.dataFileName = utils::getDataFileName(tableName); mdata.metadataFileName = utils::getMetadataFileName(tableName); Metadata m2 = Metadata(t2); for (int i=0; i<m2.columns.size(); i++){ mdata.append(m2.columns[i], m2.datatypes[i], m2.keyMap.find(m2.columns[i]) != m2.keyMap.end()); } mdata.rowCount = 0; // This should work if the above line is fixed this->chunkSize = ((20 * 1024) / mdata.rowSize); // read 20KB because we will need 20KB + 20KB + 20 * 20 MB total space while joining this->readCount = 0; // this->f = std::ifstream(utils::getDataFileName(this->tableName), std::ios::binary); this->o = std::ofstream(utils::getDataFileName(this->tableName), std::ios::binary); } Data::Data(Data *d1, Data *d2) { joinObject = true; this->tableName = d1->mdata.tableName + "_" + d1->mdata.tableName + std::to_string(rand()) + ".temp"; this->writeHappened = false; // TODO: Change mdata to a new metadata of join of both tables // create metadata for join table this->mdata = d1->mdata; mdata.tableName = tableName; mdata.dataFileName = utils::getDataFileName(tableName); mdata.metadataFileName = utils::getMetadataFileName(tableName); Metadata m2 = d2->mdata; for (int i=0; i<m2.columns.size(); i++){ mdata.append(m2.columns[i], m2.datatypes[i], m2.keyMap.find(m2.columns[i]) != m2.keyMap.end()); } mdata.rowCount = 0; // This should work if the above line is fixed this->chunkSize = ((20 * 1024) / mdata.rowSize); // read 20MB because we will need 20KB + 20KB + 20 * 20MB total space while joining this->readCount = 0; // this->f = std::ifstream(utils::getDataFileName(this->tableName), std::ios::binary); this->o = std::ofstream(utils::getDataFileName(this->tableName), std::ios::binary); } Data::Data(Data *d){ joinObject = true; writeHappened = false; tableName = d->tableName + std::to_string(rand()) + ".temp"; this->mdata = d->mdata; mdata.tableName = tableName; mdata.dataFileName = utils::getDataFileName(tableName); mdata.metadataFileName = utils::getMetadataFileName(tableName); mdata.rowCount = 0; // This should work if the above line is fixed this->chunkSize = ((500 * 1024 * 1024) / mdata.rowSize); this->readCount = 0; // this->f = std::ifstream(utils::getDataFileName(this->tableName), std::ios::binary); this->o = std::ofstream(utils::getDataFileName(this->tableName), std::ios::binary); } void Data::switchToRead(){ if(o.is_open()) o.close(); this->f = std::ifstream(utils::getDataFileName(this->tableName), std::ios::binary); f.seekg(0, std::ios::beg); }
22,024
//pass //--blockDim=64 --gridDim=64 --no-inline #include "cuda.h" __global__ void foo() { float x = 2.0f; float y = 2.0f; if(x < y) { } }
22,025
#include "includes.h" __global__ void scatter_kernel(unsigned int* d_inputVals, unsigned int* d_inputPos, unsigned int* d_outputVals, unsigned int* d_outputPos, unsigned int* cu_outputVals, size_t numElems) { //unsigned int tid = threadIdx.x; unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; unsigned int val; if (mid < numElems) { val = cu_outputVals[mid]; } if (mid < numElems) { d_outputVals[val] = d_inputVals[mid]; d_outputPos[val] = d_inputPos[mid]; } __syncthreads(); }
22,026
#include <iostream> #include <cstdlib> #include <cstdio> #include <curand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> using namespace std; __global__ void setup_kernel (curandState * state, unsigned long seed ) { int i= blockDim.x * blockIdx.x + threadIdx.x; curand_init (seed, i, 0, &state[i]); } __global__ void randomColouring (curandState* globalState, int *degreeCount, int n, int limit){ int i= blockDim.x * blockIdx.x + threadIdx.x; curandState localState = globalState[i]; float RANDOM = curand_uniform( &localState ); globalState[i] = localState; RANDOM *= (limit - 1 + 0.999999); RANDOM += 1; degreeCount[i] = (int) RANDOM; } __global__ void conflictDetection (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m, int *detectConflict){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int myColour = degreeCount[i]; int start = -1, stop = -1; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } for (int j=start; j<stop; j++){ if (degreeCount[neighbourArray[j]-1] == myColour){ // detectConflict[i]=1; // break; if (i < neighbourArray[j]-1){ if (detectConflict[i]!=1){ detectConflict[i]=1; } } else if (detectConflict[neighbourArray[j]-1]!=1){ detectConflict[neighbourArray[j]-1]=1; } // if (detectConflict[i]!=1){ // detectConflict[i]=1; // } // // if (detectConflict[neighbourArray[j]-1]!=1){ // detectConflict[neighbourArray[j]-1]=1; // } } } } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int start = -1, stop = -1; int diff=0; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } diff = stop-start; degreeCount[i]=diff; } void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){ for (int i=0; i<n-1; i++){ for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){ cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl; /* code */ } } for (int j = vertexArray[n-1]; j < m; ++j) { cout<<"e "<<n<<" "<<neighbourArray[j]<<endl; /* code */ } } int main(int argc, char const *argv[]) { /* code */ string a, b; int n, m; cin>>a>>b>>n>>m; // cout<<a<<" "<<b<<" "<<n<<" "<<m<<endl; int *h_vertexArray = new int [n]; int *h_neighbourArray = new int [2*m]; int *h_degreeCount = new int [n]; int *h_detectConflict = new int [n]; int *d_vertexArray = NULL; cudaMalloc((void **)&d_vertexArray, n*sizeof(int)); int *d_neighbourArray = NULL; cudaMalloc((void **)&d_neighbourArray, 2*m*sizeof(int)); int *d_detectConflict = NULL; cudaMalloc((void **)&d_detectConflict, (n)*sizeof(int)); cudaMemset((void *)d_detectConflict, 0, (n)*sizeof(int)); int *d_degreeCount = NULL; cudaMalloc((void **)&d_degreeCount, (n)*sizeof(int)); cudaMemset((void *)d_degreeCount, 0, (n)*sizeof(int)); curandState* devStates; cudaMalloc ( &devStates, n*sizeof( curandState ) ); for (int i = 0; i < n; ++i) { /* code */ h_vertexArray[i]=2*m; } int offset = 0; int current = 0; int mark = 1; for (int i = 0; i < 2*m; ++i) { /* code */ int start; int end; cin>>start>>end; // Uncomment for SNAP graph datasets with nodes indexed from 0 to n-1 // cin>>start>>end; // start++; // end++; if (start!=mark){ if (start == mark+1 && h_vertexArray[mark-1]!=2*m){ } else{ for (int j = mark; j<start; j++){ h_vertexArray[j-1]=offset; // h_neighbourArray[offset]=0; // offset++; } } mark = start; } if (start==current){ h_neighbourArray[offset]=end; offset++; } else { current = start; h_vertexArray[current-1]=offset; h_neighbourArray[offset]=end; offset++; } } cudaMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), cudaMemcpyHostToDevice); int threadsPerBlock = 512; int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock; //cout<<threadsPerBlock<<" "<<blocksPerGrid<<endl; degreeCalc<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m); cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost); for (int i=0; i<n; i++){ cout<<h_degreeCount[i]<<endl; } thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount); int max = *(thrust::max_element(d_ptr, d_ptr + n)); // int result = thrust::reduce(h_degreeCount, h_degreeCount + n, // -1, // thrust::maximum<int>()); // cout<<"Result: "<<result<<endl<<max; cout<<"Max = "<<max<<endl; setup_kernel <<<blocksPerGrid, threadsPerBlock>>> ( devStates, time(NULL) ); // Except for Cliques and Odd Cycles, Brook's theorem states that only Max Degree colours are enough at most randomColouring<<<blocksPerGrid, threadsPerBlock>>>(devStates, d_degreeCount, n, max); cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost); // for (int i=0; i<n; i++){ // cout<<h_degreeCount[i]<<endl; // } conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict); thrust::device_ptr<int> d_detectConflict_ptr = thrust::device_pointer_cast(d_detectConflict); int count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n); cudaMemcpy(h_detectConflict, d_detectConflict, n*sizeof(int), cudaMemcpyDeviceToHost); // for (int i=0; i<n; i++){ // cout<<i+1<<": "<<h_detectConflict[i]<<endl; // } cout<<"Count: "<<count1<<endl; int countnew=0; for (int i=0; i<n-1; i++){ if (h_detectConflict[i]==0){ continue; } countnew++; bool usedColours[max+1]; fill(usedColours, usedColours+max+1, false); // if (flag){ // flag = false; // for (int j=0; j<n; j++){ // cout<<usedColours[i]<<endl; // } // } int start = -1, stop = -1; start = h_vertexArray[i]; stop = h_vertexArray[i+1]; // cout<<"My id: "<<i<<endl; // // cout<<"My colour: "<<h_degreeCount[i]<<endl; // // cout<<"Neighbours"<<endl; // for (int j=start; j<stop; j++){ // cout<<h_degreeCount[h_neighbourArray[j]-1]<<" "; usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true; } // cout<<endl; for (int j=0; j<max+1; j++){ if (usedColours[j]==false){ h_degreeCount[i]=j+1; // cout<<"My new Colour: "<<j+1<<endl; break; } } } if (h_detectConflict[n-1]!=0){ bool usedColours[max+1]; countnew++; fill(usedColours, usedColours+max+1, false); int start = -1, stop = -1; start = h_vertexArray[n-1]; stop = 2*m; for (int j=start; j<stop; j++){ usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true; } for (int j=0; j<max+1; j++){ if (usedColours[j]==false){ h_degreeCount[n-1]=j+1; break; } } } // cout<<"SHAMILASADJKAJSDKLJASHDKJASHLDKASJKD"; // for (int i=0; i<n; i++){ // cout<<h_degreeCount[i]<<endl; // } // for (int i=0; i<n-1; i++){ // // int start = -1, stop = -1; // // start = h_vertexArray[i]; // // stop = h_vertexArray[i+1]; // // cout<<"My id: "<<i<<endl; // // cout<<"My colour: "<<h_degreeCount[i]<<endl; // // cout<<"Neighbours"<<endl; // // for (int j=start; j<stop; j++){ // cout<<h_degreeCount[h_neighbourArray[j]-1]<<" "; // } // } // // // // if (h_detectConflict[n-1]!=0){ // int start = -1, stop = -1; // // start = h_vertexArray[n-1]; // // stop = m; // // cout<<"My id: "<<n-1<<endl; // // cout<<"My colour: "<<h_degreeCount[n-1]<<endl; // // cout<<"Neighbours"<<endl; // // for (int j=start; j<stop; j++){ // cout<<h_degreeCount[h_neighbourArray[j]-1]<<" "; // } // } cout<<"Shamil"<<endl; cudaMemset((void *)d_detectConflict, 0, (n)*sizeof(int)); cudaMemcpy(d_degreeCount, h_degreeCount, n*sizeof(int), cudaMemcpyHostToDevice); conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict); count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n); cout<<"Count: "<<count1<<" "<<countnew<<endl; // for (int i=0; i<n; i++){ // if (h_degreeCount[i] == max+1){ // cout<<"BUHAHAHAHAHAHHAHAHAHHAHA"<<endl; // } // // else if (h_degreeCount[i] == 1){ // cout<<"LALLLALALALALALALALALLALA"<<endl; // } // cout<<h_degreeCount[i]<<endl; // } // for (int i=0; i<n; i++){ // cout<<i+1<<": "<<h_detectConflict[i]<<endl; // } //edgesPrint(h_vertexArray, h_neighbourArray, n, m); //delete[] h_vertexArray; //delete[] h_neighbourArray; //delete[] h_degreeCount; delete[] h_vertexArray; delete[] h_neighbourArray; delete[] h_degreeCount; delete[] h_detectConflict; cudaFree(d_neighbourArray); cudaFree(d_vertexArray); cudaFree(d_degreeCount); cudaFree(d_detectConflict); cudaDeviceReset(); return 0; }
22,027
/* KAM PUI SO (ANTHONY) CS 510 GPU Device Properity Check */ #include <sys/time.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define WIDTH 1024 #define HEIGHT 1024 #define MASK_WIDTH 127 #define MASK_HEIGHT 127 #define LOOPMAX 20 #define TILESIZE 32 #define RANGE 10 #define ROW 0 #define COL 1 // global const const int DIAGMASK[9] = {1, 0, 0, 0, 1, 0, 0, 0, 1}; const int VERTMASK[9] = {1, 0, 0, 1, 0, 0, 1, 0, 0}; // ->>> __constant__ int MASK[MASK_WIDTH * MASK_HEIGHT]; // The kernel that will execute on the GPU // ->>> __global__ void basic_2d_kernel(int *start, int *result, int width, int height, int mask_width, int mask_height) { __global__ void basic_2d_kernel(int *start, int *mask, int *result, int width, int height, int mask_width, int mask_height) { // declare kernel variable int center_x = blockDim.x * blockIdx.x + threadIdx.x; int center_y = blockDim.y * blockIdx.y + threadIdx.y; int current_x, current_y; int n_x_start_point = center_x - (mask_width / 2); int n_y_start_point = center_y - (mask_height / 2); int pvalue = 0; // loop thru the mask area for one location for (int y = 0; y < mask_height; y++) { current_y = (n_y_start_point + y + height) % height; if ((current_y >= 0) && (current_y < height)) { for (int x = 0; x < mask_width; x++) { current_x = (n_x_start_point + x + width) % width; if ((current_x >= 0) && (current_x < width)) { pvalue += start[(current_y * width) + current_x] * mask[(y * mask_width) + x]; // only for non-const memory version // ->>> pvalue += start[(current_y * width) + current_x] * MASK[(y * mask_width) + x]; } } } } result[(center_y * width) + center_x] = pvalue; } // This function encapsulates the process of creating and tearing down the // environment used to execute our game of life iteration kernel. The steps of the // process are: // 1. Allocate memory on the device to hold our board vectors // 2. Copy the board vectors to device memory // 3. Execute the kernel // 4. Retrieve the result board vector from the device by copying it to the host // 5. Free memory on the device // void basic_2d_dev(int *start, int *mask, int *result, int width, int height, int mask_width, int mask_height) { // Step 1: Allocate memory int *mask_dev; // only for non-const memory version int *start_dev, *result_dev; int n = width * height; int m = mask_width * mask_height; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. cudaMalloc((void **) &start_dev, sizeof(int) * n); cudaMalloc((void **) &result_dev, sizeof(int) * n); cudaMalloc((void **) &mask_dev, sizeof(int) * m); // only for non-const memory version // Step 2: Copy the input vectors to the device cudaMemcpy(start_dev, start, sizeof(int) * n, cudaMemcpyHostToDevice); cudaMemcpy(mask_dev, mask, sizeof(int) * m, cudaMemcpyHostToDevice); // only for non-const memory version // ->>> cudaMemcpyToSymbol(MASK, mask, sizeof(int) * m); // only for const memory version // Step 3: Invoke the kernel dim3 dimGrid(TILESIZE, TILESIZE, 1); dim3 dimBlock(ceil(width/ (float) TILESIZE), ceil(height/ (float) TILESIZE), 1); basic_2d_kernel<<<dimGrid, dimBlock>>>(start_dev, mask_dev, result_dev, width, height, mask_width, mask_height); // ->>> basic_2d_kernel<<<dimGrid, dimBlock>>>(start_dev, result_dev, width, height, mask_width, mask_height); // only for const memory version // Step 4: Retrieve the results cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost); // Step 5: Free device memory cudaFree(start_dev); cudaFree(result_dev); cudaFree(mask_dev); // only for non-const memory version } // The old-fashioned CPU-only way (for validation) void basic_2d_host(int *start, int *mask, int *result, int width, int height, int mask_width, int mask_height) { int x; int y; int m_x; int m_y; int n_x; int n_y; int offset_x = mask_width / 2; int offset_y = mask_height / 2; int pvalue = 0; for (y = 0; y < height; ++y) { for (x = 0; x < width; ++x) { pvalue = 0; for (m_y = 0; m_y < mask_height; m_y++) { for (m_x = 0; m_x < mask_width; m_x++) { n_x = (x + m_x - (offset_x) + width) % width; n_y = (y + m_y - (offset_y) + height) % height; pvalue += (start[n_y * width + n_x] * mask[m_y * mask_width + m_x]); } } result[y * width + x] = pvalue; } } } // fill the image with random values within the range void fill_image(int *image, int width, int height, int range) { int i; for (i = 0; i < (width * height); i++) image[i] = rand() % range; } // fill the mask with pattern values void fill_pattern(int *image, int width, int height, int scale) { int i; for (i = 0; i < (width * height); i++) { if (i % (width / 2)) image[i] = 0; else image[i] = rand() % scale; } } // print divider void print_divider() { printf("---------------------------------------\n"); } // print image as a width x height matrix void print_image(int *image, int width, int height) { int x, y; for (y = 0; y<height; y++) { for (x = 0; x<width; x++) { printf("%d ", image[y * width + x]); } printf("\n"); } print_divider(); } // normalize mask image (for easy printing with large array) void normalize_image(int *image, int width, int height, int scale) { int i; int max = image[0]; // find max and min for (i = 0; i < (width * height); i++) { if (image[i] > max) max = image[i]; } for (i = 0; i < (width * height); i++) { image[i] = (int) ((float) image[i] / (float) max * (float) (scale -1)) ; } } // show device capability void device_check() { int deviceCount; int device; cudaGetDeviceCount(&deviceCount); for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); print_divider(); printf("Device %d has compute capability %d.%d\n", device, deviceProp.major, deviceProp.minor); printf("Max Threads per Block: %d \n", deviceProp.maxThreadsPerBlock); printf("Max Threads for x direction per Block: %d \n", deviceProp.maxThreadsDim[0]); printf("Max Threads for y direction per Block: %d \n", deviceProp.maxThreadsDim[1]); printf("Max Threads for z direction per Block: %d \n", deviceProp.maxThreadsDim[2]); printf("Max Blocks for x direction per Grid: %d \n", deviceProp.maxGridSize[0]); printf("Max Blocks for y direction per Grid: %d \n", deviceProp.maxGridSize[1]); printf("Max Blocks for z direction per Grid: %d \n", deviceProp.maxGridSize[2]); printf("Max Warp Size: %d \n", deviceProp.warpSize); printf("Number of SM: %d \n", deviceProp.multiProcessorCount); printf("Max Threads per SM: %d \n", deviceProp.maxThreadsPerMultiProcessor); printf("Number of Registers in each SM: %d \n", deviceProp.regsPerBlock); printf("Amount of Shared Memory Available: %zd \n", deviceProp.sharedMemPerBlock); printf("Amount of Constant Memory Available: %zd \n", deviceProp.totalConstMem); printf("Amount of Global Memory Available: %zd \n", deviceProp.totalGlobalMem); printf("Clock Rate: %d \n", deviceProp.clockRate); print_divider(); } } // print different of two times in nano seconds void print_time(timeval begin, timeval end) { printf("Time = %ld us\n", ((end.tv_sec * 1000000 + end.tv_usec) - (begin.tv_sec * 1000000 + begin.tv_usec ))); } // main convolution setup and tear down logics void convolution(int width, int height, int mask_width, int mask_height) { // image variable int n = width * height; int *start = (int *) malloc(n* sizeof(int)); int *result = (int *) malloc(n * sizeof(int)); // mask variable int m = mask_width * mask_height; int *mask = (int *) malloc(m * sizeof(int)); // int *mask = (int *) VERTMASK; // static mask with vertical 1's // int *mask = (int *) DIAGMASK; // static mask with diagonal 1's // time variable struct timeval begin, end; // initialize the mask image and global image // print_divider(); fill_image(mask, mask_width, mask_height, RANGE); // print_image(mask, mask_width, mask_height); fill_image(start, width, height, RANGE); // fill_pattern(start, width, height, RANGE); // print_image(start, width, height); // run 2d convulotion with timer and print result gettimeofday(&begin, NULL); // basic_2d_host(start, mask, result, width, height, mask_width, mask_height); // host only code for validation basic_2d_dev(start, mask, result, width, height, mask_width, mask_height); gettimeofday(&end, NULL); // print_image(result, width, height); print_time(begin, end); // print_divider(); // free memory free(start); free(result); free(mask); } // main function int main(void) { // variables // initialize rand seed // srand(time(NULL)); // check device property (warm up device...) device_check(); // test convolution // convolution(WIDTH, HEIGHT, MASK_WIDTH, MASK_HEIGHT); return 0; }
22,028
#include <iostream> #include <array> #include <cuda_runtime.h> __global__ void saxpy(float* a_ptr, float* b_ptr, int N) { const int thread_index = blockIdx.x*blockDim.x + threadIdx.x; if (thread_index >= N) { return; } a_ptr[thread_index] = a_ptr[thread_index] + b_ptr[thread_index]; } __global__ void reduce(float* a_dev_ptr) { extern __shared__ float shared_data[]; const unsigned int tid = threadIdx.x; const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; shared_data[tid] = a_dev_ptr[i]; __syncthreads(); for (unsigned int s = 1; s < blockDim.x; s*=2) { if (tid % (2*s) == 0) { shared_data[tid] += shared_data[tid + s]; } __syncthreads(); } if (tid == 0) { a_dev_ptr[blockIdx.x] = shared_data[0]; } } int main() { std::cout << "Hello, World!" << std::endl; constexpr int N = 1000; std::array<float, N> a = {}; std::array<float, N> b = {}; for (int i = 0; i < N; i++) { a.at(i) = static_cast<float>(i); b.at(i) = 42.0f; } void* a_dev_ptr = nullptr; void* b_dev_ptr = nullptr; cudaMalloc(&a_dev_ptr, sizeof(a)); cudaMemcpy(a_dev_ptr, a.data(), sizeof(a), cudaMemcpyHostToDevice); reduce<<<10, 100, sizeof(float)*100>>>(reinterpret_cast<float*>(a_dev_ptr)); cudaMemcpy(a.data(), a_dev_ptr, sizeof(a), cudaMemcpyDeviceToHost); for (const auto e: a) { std::cout << e << '\n'; } float sum = 0; for (int i=0; i < 10; i++) { sum += a.at(i); } std::cout << "Total sum: " << sum << '\n'; return 0; }
22,029
/******************************************************************************* * PROGRAM: canny_edge_detector * FILE: gaussian_smooth.cu * PURPOSE: Apply Gaussian Smooth to input pgm image * NAME: Vuong Pham-Duy * Faculty of Computer Science and Technology * Ho Chi Minh University of Technology, Viet Nam * vuongpd95@gmail.com * DATE: 11/10/2016 *******************************************************************************/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #define _USE_MATH_DEFINES #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #define VERBOSE 1 #define BOOSTBLURFACTOR 90.0 /**************************************************************************** * Functions used for debugging ****************************************************************************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true); void make_gaussian_kernel(float sigma, float **kernel, int *windowsize); __global__ void gblur_xdir_kernel(int rows, int cols, int blockSize, float sigma, int windowsize, float *d_kernel, float *d_tempim, unsigned char *d_image); __global__ void gblur_ydir_kernel(int rows, int cols, int blockSize, float sigma, int windowsize, float *d_kernel, float *d_tempim, short int *d_smoothedim); /******************************************************************************* * PROCEDURE: gaussian_smooth * PURPOSE: Blur an image with a gaussian filter. * NAME: Vuong Pham-duy * DATE: 10/11/2016 *******************************************************************************/ void gaussian_smooth(int rows, int cols, float sigma, int blockSize, int gridSize, short int **d_smoothedim, unsigned char **d_image) { int windowsize;/* Dimension of the gaussian kernel. */ float *h_kernel, *d_kernel;/* A one dimensional gaussian kernel in host/device. */ float *d_tempim;/* Buffer for separable filter gaussian smoothing. */ /**************************************************************************** * Create a 1-dimensional gaussian smoothing kernel. ****************************************************************************/ if (VERBOSE) printf("Computing the gaussian smoothing kernel.\n"); make_gaussian_kernel(sigma, &h_kernel, &windowsize); /**************************************************************************** * Allocate memory for kernel, the tmp buffer image and the smoothed image ****************************************************************************/ gpuErrchk(cudaMalloc((void**)&d_tempim, rows * cols * sizeof(float))); gpuErrchk(cudaMalloc((void**)&d_kernel, windowsize * sizeof(float))); gpuErrchk(cudaMemcpy((void*)d_kernel, (void*)h_kernel, windowsize * sizeof(float), cudaMemcpyHostToDevice)); if (VERBOSE) printf("Smoothing the image using a gaussian kernel.\n"); gblur_xdir_kernel<<<gridSize, blockSize>>>(rows, cols, blockSize, sigma, windowsize, d_kernel, d_tempim, (*d_image)); gblur_ydir_kernel<<<gridSize, blockSize>>>(rows, cols, blockSize, sigma, windowsize, d_kernel, d_tempim, (*d_smoothedim)); gpuErrchk(cudaFree(d_kernel)); gpuErrchk(cudaFree(d_tempim)); free(h_kernel); } __global__ void gblur_xdir_kernel(int rows, int cols, int blockSize, float sigma, int windowsize, float *d_kernel, float *d_tempim, unsigned char *d_image) { /* This thread process the number img_idx element of image */ int img_idx = blockIdx.x * blockSize + threadIdx.x; if (img_idx >= (rows * cols)) return; int r = img_idx / cols; /* row position of the pixel, range [0, rows - 1] */ int c = img_idx - r * cols;/* col position of the pixel, range [0, cols - 1] */ int center = windowsize / 2;/* Half of the windowsize. */ /**************************************************************************** * Gaussian smooth in x direction ****************************************************************************/ int counter; /* gaussian kernel counter */ float dot = 0.0; float sum = 0.0; for (counter = (-center); counter <= center; counter++){ if (((c + counter) >= 0) && ((c + counter) < cols)){ dot += (float)d_image[img_idx + counter] * d_kernel[center + counter]; sum += d_kernel[center + counter]; } } d_tempim[img_idx] = dot / sum; } __global__ void gblur_ydir_kernel(int rows, int cols, int blockSize, float sigma, int windowsize, float *d_kernel, float *d_tempim, short int *d_smoothedim) { // This thread process the number img_idx element of image int img_idx = blockIdx.x * blockSize + threadIdx.x; if (img_idx >= (rows * cols)) return; int r = img_idx / cols;/* row position of the pixel, range [0, rows - 1] */ int c = img_idx - r * cols;/* col position of the pixel, range [0, cols - 1] */ int center = windowsize / 2;/* Half of the windowsize. */ /**************************************************************************** * Gaussian smooth in y direction ****************************************************************************/ int rr; float dot = 0.0; float sum = 0.0; for (rr = (-center); rr <= center; rr++){ if (((r + rr) >= 0) && ((r + rr) < rows)){ dot += d_tempim[(r + rr)*cols + c] * d_kernel[center + rr]; sum += d_kernel[center + rr]; } } d_smoothedim[img_idx] = (short int)(dot*BOOSTBLURFACTOR / sum + 0.5); } /******************************************************************************* * PROCEDURE: make_gaussian_kernel * PURPOSE: Create a one dimensional gaussian kernel. * NAME: Mike Heath * DATE: 2/15/96 *******************************************************************************/ void make_gaussian_kernel(float sigma, float **kernel, int *windowsize) { int i, center; float x, fx, sum = 0.0; *windowsize = 1 + 2 * ceil(2.5 * sigma); center = (*windowsize) / 2; if (VERBOSE) printf(" The kernel has %d elements.\n", *windowsize); if ((*kernel = (float *)calloc((*windowsize), sizeof(float))) == NULL){ fprintf(stderr, "Error callocing the gaussian kernel array.\n"); exit(1); } for (i = 0; i<(*windowsize); i++){ x = (float)(i - center); fx = pow(2.71828, -0.5*x*x / (sigma*sigma)) / (sigma * sqrt(6.2831853)); (*kernel)[i] = fx; sum += fx; } for (i = 0; i<(*windowsize); i++) (*kernel)[i] /= sum; if (VERBOSE){ printf("The filter coefficients are:\n"); for (i = 0; i<(*windowsize); i++) printf("kernel[%d] = %f\n", i, (*kernel)[i]); } }
22,030
template <typename T> struct Neighbours3x3 { T p11; T p12; T p13; T p21; T p22; T p23; T p31; T p32; T p33; }; template <typename T> __device__ T d_getPixel(const T* const src, int x, int y, int width, int height){ int colaced_loc = (x + y*width); if (colaced_loc < 0) { return make_uchar1(0); } if (colaced_loc > width*height){ return src[width*height]; } return src[colaced_loc]; } template <typename T> __device__ void d_getNeighbours_8( const T* const src, int width, int height, int x, int y, Neighbours3x3<T> &neighbours ){ neighbours.p11 = d_getPixel(src, x-1, y-1, width, height); neighbours.p12 = d_getPixel(src, x, y-1, width, height); neighbours.p13 = d_getPixel(src, x+1, y-1, width, height); neighbours.p21 = d_getPixel(src, x-1, y, width, height); neighbours.p22 = d_getPixel(src, x, y, width, height); neighbours.p23 = d_getPixel(src, x+1, y, width, height); neighbours.p31 = d_getPixel(src, x-1, y+1, width, height); neighbours.p32 = d_getPixel(src, x, y+1, width, height); neighbours.p33 = d_getPixel(src, x+1, y+1, width, height); } template <typename T> __device__ char d_avgNeighbours_8( Neighbours3x3<T> &neighbours ) { return ( neighbours.p11.x + neighbours.p12.x + neighbours.p13.x + neighbours.p21.x + neighbours.p22.x + neighbours.p23.x + neighbours.p31.x + neighbours.p32.x + neighbours.p33.x )/9.0; }
22,031
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #include <cuda.h> #include <string.h> #define X 1024 #define Y 1024 #define BX 4 #define BY 4 #define USEC_ELAPSED(start,end) ((end.tv_sec - start.tv_sec) * 1000 * 1000 + (end.tv_usec - start.tv_usec)) #define CHECK_RET(x) \ ret = x; \ if (ret != CUDA_SUCCESS) { \ cudaError_t err = cudaGetLastError(); \ printf("cuda function failure at line %d :%s \n",__LINE__,cudaGetErrorString(err)); \ exit(1); } void print_matrix(float *buff,int x ,int y) { printf("\n\n"); int i = 0; for (i=0 ;i<x ;i++) { int j = 0; for (j=0 ;j<y ;j++) printf("%20.3f\t",buff[i*y + j]); printf("\n"); } } void check_result(float * buf1, float * buf2,int x ,int y) { int i = 0; for (i = 0 ;i<x ;i++) { int j = 0; for ( j = 0;j < y;j++) if (abs(buf1[i*y+j] - buf2[i*y+j]) > 0.00000001f) { printf("idx :%d Host:%10.5f Device:%10.5f\n",i*y+j,buf1[i*y+j], buf2[i*y+j]); exit(1); } } } __global__ void matrixSum(float *matrixA,float *matrixB,float *matrixC) { int idx = (gridDim.y * blockIdx.x + blockIdx.y )*blockDim.x*blockDim.y*blockDim.z + blockDim.z*blockDim.y*threadIdx.x + blockDim.z*threadIdx.y + threadIdx.z; // printf("idx :%d grid(%d,%d,%d) block(%d,%d,%d)\n",idx,blockIdx.x,blockIdx.y,blockIdx.z,threadIdx.x,threadIdx.y,threadIdx.z); matrixC[idx] = matrixA[idx] + matrixB[idx]; } __global__ void matrixSub(float *matrixA,float *matrixB,float *matrixC) { int idx = (gridDim.y * blockIdx.x + blockIdx.y )*blockDim.x*blockDim.y*blockDim.z + blockDim.z*blockDim.y*threadIdx.x + blockDim.z*threadIdx.y + threadIdx.z; // printf("idx :%d grid(%d,%d,%d) block(%d,%d,%d)\n",idx,blockIdx.x,blockIdx.y,blockIdx.z,threadIdx.x,threadIdx.y,threadIdx.z); matrixC[idx] = matrixA[idx] - matrixB[idx]; } void usage(int argc,char ** argv) { printf("%s usage:\n",argv[0]); printf(" : -x dimX\n"); printf(" : -y dimy\n"); printf(" : -i blockX\n"); printf(" : -j blockY\n"); printf("exiting\n"); exit(1); } int main(int argc,char **argv) { int ret; int nx = X ; int ny = Y ; int blockDimX = BX; int blockDimY = BY; float * matrixA,* matrixB, *matrixC,* matrixD,*matrixC_d, *matrixD_d; struct timeval start,end; int c ; while ( (c = getopt(argc,argv,"x:y:i:j:")) != -1 ) { switch (c) { case 'x': nx = atoi(optarg); break; case 'y': ny = atoi(optarg); break; case 'i': blockDimX = atoi(optarg); break; case 'j': blockDimY = atoi(optarg); break; default : usage(argc,argv); } } int nElems = nx * ny; int nBytes = nElems * sizeof(float); matrixA = (float *) malloc(nBytes); matrixB = (float *) malloc(nBytes); matrixC = (float *) malloc(nBytes); matrixD = (float *) malloc(nBytes); matrixC_d = (float *) malloc(nBytes); float * d_matrixA,* d_matrixB, * d_matrixC ,* d_matrixD; CHECK_RET (cudaMalloc(&d_matrixA,nBytes)); CHECK_RET (cudaMalloc(&d_matrixB,nBytes)); CHECK_RET (cudaMalloc(&d_matrixC,nBytes)); int i = 0 ; for (; i < nElems; i++) matrixA[i] = matrixB[i] = 0.01f * i; gettimeofday(&start,NULL); for (i=0; i < nElems; i++) { matrixC[i] = matrixA[i] + matrixB[i] ; matrixD[i] = matrixA[i] - matrixB[i] ; } gettimeofday(&end,NULL); printf("%ld usec elapsed for caculation on CPU\n",USEC_ELAPSED(start,end)); dim3 b(blockDimX,blockDimY); dim3 g((nx + blockDimX - 1)/blockDimX, (ny + blockDimY - 1)/blockDimY ); CHECK_RET(cudaMemcpy(d_matrixA,matrixA,nBytes,cudaMemcpyHostToDevice)); CHECK_RET(cudaMemcpy(d_matrixB,matrixB,nBytes,cudaMemcpyHostToDevice)); gettimeofday(&start,NULL); cudaError_t err = cudaGetLastError(); \ printf("cuda function failure at line %d :%s \n",__LINE__,cudaGetErrorString(err)); \ matrixSum<<<b,g>>>(d_matrixA,d_matrixB,d_matrixC); matrixSub<<<b,g>>>(d_matrixA,d_matrixB,d_matrixD); err = cudaGetLastError(); \ printf("cuda function failure at line %d :%s \n",__LINE__,cudaGetErrorString(err)); \ cudaDeviceSynchronize(); gettimeofday(&end,NULL); printf("%ld usec elapsed for caculation on GPU\n",USEC_ELAPSED(start,end)); CHECK_RET(cudaMemcpy(matrixC_d,d_matrixC,nBytes,cudaMemcpyDeviceToHost)); CHECK_RET(cudaMemcpy(matrixD_d,d_matrixD,nBytes,cudaMemcpyDeviceToHost)); check_result(matrixC,matrixC_d,nx,ny); check_result(matrixD,matrixD_d,nx,ny); cudaFree(d_matrixA); cudaFree(d_matrixB); cudaFree(d_matrixC); cudaFree(d_matrixD); #ifdef __DEBUG print_matrix(matrixA,nx,ny); print_matrix(matrixB,nx,ny); print_matrix(matrixC,nx,ny); print_matrix(matrixC_d,nx,ny); #endif free(matrixA); free(matrixB); free(matrixC); free(matrixC_d); free(matrixD_d); return 0; }
22,032
#include <stdio.h> #include <cuda_runtime.h> int main(void){ cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, 0) != cudaSuccess){ fprintf(stderr, "no device avalible, go a hell, man\n"); } fprintf(stdout, "get prop success\n"); fprintf(stdout, " name: %s\n", prop.name); fprintf(stdout, " totalGlobalMem: %lu\n", prop.totalGlobalMem); fprintf(stdout, " sharedMemPerBlock: %lu\n", prop.sharedMemPerBlock); fprintf(stdout, " regsPerBlock: %d\n", prop.regsPerBlock); fprintf(stdout, " warpSize: %d\n", prop.warpSize); fprintf(stdout, " memPitch: %lu\n", prop.memPitch); fprintf(stdout, " maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock); fprintf(stdout, " maxThreadsDim:\n"); fprintf(stdout, " maxThreadsDim[0]: %d\n", prop.maxThreadsDim[0]); fprintf(stdout, " maxThreadsDim[1]: %d\n", prop.maxThreadsDim[1]); fprintf(stdout, " maxThreadsDim[2]: %d\n", prop.maxThreadsDim[2]); fprintf(stdout, " maxThreadsGridSize:\n"); fprintf(stdout, " maxGridSize[0]: %d\n", prop.maxGridSize[0]); fprintf(stdout, " maxGridSize[1]: %d\n", prop.maxGridSize[1]); fprintf(stdout, " maxGridSize[2]: %d\n", prop.maxGridSize[2]); fprintf(stdout, " clockRate: %d\n", prop.clockRate); fprintf(stdout, " totalConstMem: %lu\n", prop.totalConstMem); fprintf(stdout, " major.minor: %d.%d\n", prop.major, prop.minor); fprintf(stdout, " textureAlignment: %lu\n", prop.textureAlignment); fprintf(stdout, " texturePitchAlignment: %lu\n", prop.texturePitchAlignment); fprintf(stdout, " deviceOverlap: %d\n", prop.deviceOverlap); fprintf(stdout, " multiProcessorCount: %d\n", prop.multiProcessorCount); fprintf(stdout, " kernelExecTimeoutEnabled: %d\n", prop.kernelExecTimeoutEnabled); fprintf(stdout, " integrated: %d\n", prop.integrated); fprintf(stdout, " canMapHostMemory: %d\n", prop.canMapHostMemory); fprintf(stdout, " computeMode: %d\n", prop.computeMode); fprintf(stdout, " maxTexture1D: %d\n", prop.maxTexture1D); fprintf(stdout, " maxTexture1DLinear: %d\n", prop.maxTexture1DLinear); fprintf(stdout, " maxTexture2D\n"); fprintf(stdout, " maxTexture2D[0]: %d\n", prop.maxTexture2D[0]); fprintf(stdout, " maxTexture2D[1]: %d\n", prop.maxTexture2D[1]); fprintf(stdout, " maxTexture2DLinear\n"); fprintf(stdout, " maxTexture2DLinear[0]: %d\n", prop.maxTexture2DLinear[0]); fprintf(stdout, " maxTexture2DLinear[1]: %d\n", prop.maxTexture2DLinear[1]); fprintf(stdout, " maxTexture2DLinear[2]: %d\n", prop.maxTexture2DLinear[2]); fprintf(stdout, " maxTexture2DGather\n"); fprintf(stdout, " maxTexture2DGather[0]: %d\n", prop.maxTexture2DGather[0]); fprintf(stdout, " maxTexture2DGather[1]: %d\n", prop.maxTexture2DGather[1]); fprintf(stdout, " maxTexture3D\n"); fprintf(stdout, " maxTexture3D[0]: %d\n", prop.maxTexture3D[0]); fprintf(stdout, " maxTexture3D[1]: %d\n", prop.maxTexture3D[1]); fprintf(stdout, " maxTexture3D[2]: %d\n", prop.maxTexture3D[2]); fprintf(stdout, " maxSurface1D: %d\n", prop.maxSurface1D); fprintf(stdout, " maxSurface2D\n"); fprintf(stdout, " maxSurface2D[0]: %d\n", prop.maxSurface2D[0]); fprintf(stdout, " maxSurface2D[1]: %d\n", prop.maxSurface2D[1]); fprintf(stdout, " maxSurface3D\n"); fprintf(stdout, " maxSueface3D[0]: %d\n", prop.maxSurface3D[0]); fprintf(stdout, " maxSueface3D[1]: %d\n", prop.maxSurface3D[1]); fprintf(stdout, " maxSueface3D[2]: %d\n", prop.maxSurface3D[2]); fprintf(stdout, " maxSurface1DLayered\n"); fprintf(stdout, " maxSurface1DLayered[0]: %d\n", prop.maxSurface1DLayered[0]); fprintf(stdout, " maxSurface1DLayered[1]: %d\n", prop.maxSurface1DLayered[1]); fprintf(stdout, " maxSurface2DLayered\n"); fprintf(stdout, " maxSurface2DLayered[0]: %d\n", prop.maxSurface2DLayered[0]); fprintf(stdout, " maxSurface2DLayered[1]: %d\n", prop.maxSurface2DLayered[1]); fprintf(stdout, " maxSurface2DLayered[2]: %d\n", prop.maxSurface2DLayered[2]); fprintf(stdout, " maxSurfaceCubemap: %d\n", prop.maxSurfaceCubemap); fprintf(stdout, " maxSurfaceCubemapLayered\n"); fprintf(stdout, " maxSurfaceCubemapLayered[0]: %d\n", prop.maxSurfaceCubemapLayered[0]); fprintf(stdout, " maxSurfaceCubemapLayered[1]: %d\n", prop.maxSurfaceCubemapLayered[1]); fprintf(stdout, " surfaceAlignment: %ld\n", prop.surfaceAlignment); fprintf(stdout, " concurrentKernels: %d\n", prop.concurrentKernels); fprintf(stdout, " ECCEnabled: %d\n", prop.ECCEnabled); fprintf(stdout, " pciBusID: %d\n", prop.pciBusID); fprintf(stdout, " pciDeviceID: %d\n", prop.pciDeviceID); fprintf(stdout, " pciDomainID: %d\n", prop.pciDomainID); fprintf(stdout, " tccDriver: %d\n", prop.tccDriver); fprintf(stdout, " asyncEngineCount: %d\n", prop.asyncEngineCount); fprintf(stdout, " unifiedAddressing: %d\n", prop.unifiedAddressing); fprintf(stdout, " memoryClockRate: %d\n", prop.memoryClockRate); fprintf(stdout, " l2CacheSize: %d\n", prop.l2CacheSize); fprintf(stdout, " maxThreadsPerMultiProcessor: %d\n", prop.maxThreadsPerMultiProcessor); return 0; }
22,033
#include <stdio.h> /* experiment with N */ /* how large can it be? */ #define N (2048*2048) #define THREADS_PER_BLOCK 512 __global__ void vector_add(int *a, int *b, int *c) { /* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */ int index = int(blockIdx.x) * int(blockDim.x) + int(threadIdx.x); //printf("block dim:%d,%d,blockid:%d,threadidx.x:%d\n",blockDim.x,blockDim.y,blockIdx.x,threadIdx.x); //printf("index:%d\n",index); c[index] = a[index] + b[index]; if(index >= N - 1) { printf("%d\n",index); printf("%d,%d,%d\n",a[index],b[index],c[index]); } } int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); /* allocate space for device copies of a, b, c */ cudaMalloc( (void **) &d_a, size ); cudaMalloc( (void **) &d_b, size ); cudaMalloc( (void **) &d_c, size ); /* allocate space for host copies of a, b, c and setup input values */ a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < N; i++ ) { a[i] = b[i] = i; c[i] = 0; } /* copy inputs to device */ /* fix the parameters needed to copy data to the device */ cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice); /* launch the kernel on the GPU */ /* insert the launch parameters to launch the kernel properly using blocks and threads */ vector_add<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, d_b, d_c ); /* copy result back to host */ /* fix the parameters needed to copy data back to the host */ cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost); printf( "c[0] = %d\n",c[0] ); printf( "c[%d] = %d\n",N-1, c[N-1] ); /* clean up */ { cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); } free(a); free(b); free(c); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); return 0; } /* end main */
22,034
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void hypterm_1 (double * __restrict__ flux_0, double * __restrict__ flux_1, double * __restrict__ flux_2, double * __restrict__ flux_3, double * __restrict__ flux_4, double * __restrict__ cons_1, double * __restrict__ cons_2, double * __restrict__ cons_3, double * __restrict__ cons_4, double * __restrict__ q_1, double * __restrict__ q_2, double * __restrict__ q_3, double * __restrict__ q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.z); if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) { flux_0[k*M*N+j*N+i] = -((0.8*(cons_1[k*M*N+j*N+i+1]-cons_1[k*M*N+j*N+i-1])-0.2*(cons_1[k*M*N+j*N+i+2]-cons_1[k*M*N+j*N+i-2])+0.038*(cons_1[k*M*N+j*N+i+3]-cons_1[k*M*N+j*N+i-3])-0.0035*(cons_1[k*M*N+j*N+i+4]-cons_1[k*M*N+j*N+i-4]))*dxinv0); flux_1[k*M*N+j*N+i] = -((0.8*(cons_1[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_1[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1]+(q_4[k*M*N+j*N+i+1]-q_4[k*M*N+j*N+i-1]))-0.2*(cons_1[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_1[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2]+(q_4[k*M*N+j*N+i+2]-q_4[k*M*N+j*N+i-2]))+0.038*(cons_1[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_1[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3]+(q_4[k*M*N+j*N+i+3]-q_4[k*M*N+j*N+i-3]))-0.0035*(cons_1[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_1[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]+(q_4[k*M*N+j*N+i+4]-q_4[k*M*N+j*N+i-4])))*dxinv0); flux_2[k*M*N+j*N+i] = -((0.8*(cons_2[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_2[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1])-0.2*(cons_2[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_2[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2])+0.038*(cons_2[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_2[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3])-0.0035*(cons_2[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_2[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]))*dxinv0); flux_3[k*M*N+j*N+i] = -((0.8*(cons_3[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_3[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1])-0.2*(cons_3[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_3[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2])+0.038*(cons_3[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_3[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3])-0.0035*(cons_3[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_3[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]))*dxinv0); flux_0[k*M*N+j*N+i] -= (0.8*(cons_2[k*M*N+(j+1)*N+i]-cons_2[k*M*N+(j-1)*N+i])-0.2*(cons_2[k*M*N+(j+2)*N+i]-cons_2[k*M*N+(j-2)*N+i])+0.038*(cons_2[k*M*N+(j+3)*N+i]-cons_2[k*M*N+(j-3)*N+i])-0.0035*(cons_2[k*M*N+(j+4)*N+i]-cons_2[k*M*N+(j-4)*N+i]))*dxinv1; flux_1[k*M*N+j*N+i] -= (0.8*(cons_1[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_1[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i])-0.2*(cons_1[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_1[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i])+0.038*(cons_1[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_1[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i])-0.0035*(cons_1[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_1[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]))*dxinv1; flux_2[k*M*N+j*N+i] -= (0.8*(cons_2[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_2[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i]+(q_4[k*M*N+(j+1)*N+i]-q_4[k*M*N+(j-1)*N+i]))-0.2*(cons_2[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_2[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i]+(q_4[k*M*N+(j+2)*N+i]-q_4[k*M*N+(j-2)*N+i]))+0.038*(cons_2[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_2[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i]+(q_4[k*M*N+(j+3)*N+i]-q_4[k*M*N+(j-3)*N+i]))-0.0035*(cons_2[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_2[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]+(q_4[k*M*N+(j+4)*N+i]-q_4[k*M*N+(j-4)*N+i])))*dxinv1; flux_3[k*M*N+j*N+i] -= (0.8*(cons_3[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_3[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i])-0.2*(cons_3[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_3[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i])+0.038*(cons_3[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_3[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i])-0.0035*(cons_3[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_3[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]))*dxinv1; } } __global__ void hypterm_2 (double * __restrict__ flux_0, double * __restrict__ flux_1, double * __restrict__ flux_2, double * __restrict__ flux_3, double * __restrict__ flux_4, double * __restrict__ cons_1, double * __restrict__ cons_2, double * __restrict__ cons_3, double * __restrict__ cons_4, double * __restrict__ q_1, double * __restrict__ q_2, double * __restrict__ q_3, double * __restrict__ q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(4*blockdim_k); int k = max (k0, 0) + 4*(int)(threadIdx.z); if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) { flux_0[k*M*N+j*N+i] -= (0.8*(cons_3[(k+1)*M*N+j*N+i]-cons_3[(k-1)*M*N+j*N+i])-0.2*(cons_3[(k+2)*M*N+j*N+i]-cons_3[(k-2)*M*N+j*N+i])+0.038*(cons_3[(k+3)*M*N+j*N+i]-cons_3[(k-3)*M*N+j*N+i])-0.0035*(cons_3[(k+4)*M*N+j*N+i]-cons_3[(k-4)*M*N+j*N+i]))*dxinv2; flux_0[(k+1)*M*N+j*N+i] -= (0.8*(cons_3[((k+1)+1)*M*N+j*N+i]-cons_3[((k+1)-1)*M*N+j*N+i])-0.2*(cons_3[((k+1)+2)*M*N+j*N+i]-cons_3[((k+1)-2)*M*N+j*N+i])+0.038*(cons_3[((k+1)+3)*M*N+j*N+i]-cons_3[((k+1)-3)*M*N+j*N+i])-0.0035*(cons_3[((k+1)+4)*M*N+j*N+i]-cons_3[((k+1)-4)*M*N+j*N+i]))*dxinv2; flux_0[(k+2)*M*N+j*N+i] -= (0.8*(cons_3[((k+2)+1)*M*N+j*N+i]-cons_3[((k+2)-1)*M*N+j*N+i])-0.2*(cons_3[((k+2)+2)*M*N+j*N+i]-cons_3[((k+2)-2)*M*N+j*N+i])+0.038*(cons_3[((k+2)+3)*M*N+j*N+i]-cons_3[((k+2)-3)*M*N+j*N+i])-0.0035*(cons_3[((k+2)+4)*M*N+j*N+i]-cons_3[((k+2)-4)*M*N+j*N+i]))*dxinv2; flux_0[(k+3)*M*N+j*N+i] -= (0.8*(cons_3[((k+3)+1)*M*N+j*N+i]-cons_3[((k+3)-1)*M*N+j*N+i])-0.2*(cons_3[((k+3)+2)*M*N+j*N+i]-cons_3[((k+3)-2)*M*N+j*N+i])+0.038*(cons_3[((k+3)+3)*M*N+j*N+i]-cons_3[((k+3)-3)*M*N+j*N+i])-0.0035*(cons_3[((k+3)+4)*M*N+j*N+i]-cons_3[((k+3)-4)*M*N+j*N+i]))*dxinv2; flux_1[k*M*N+j*N+i] -= (0.8*(cons_1[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_1[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i])-0.2*(cons_1[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_1[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i])+0.038*(cons_1[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_1[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i])-0.0035*(cons_1[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_1[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]))*dxinv2; flux_1[(k+1)*M*N+j*N+i] -= (0.8*(cons_1[((k+1)+1)*M*N+j*N+i]*q_3[((k+1)+1)*M*N+j*N+i]-cons_1[((k+1)-1)*M*N+j*N+i]*q_3[((k+1)-1)*M*N+j*N+i])-0.2*(cons_1[((k+1)+2)*M*N+j*N+i]*q_3[((k+1)+2)*M*N+j*N+i]-cons_1[((k+1)-2)*M*N+j*N+i]*q_3[((k+1)-2)*M*N+j*N+i])+0.038*(cons_1[((k+1)+3)*M*N+j*N+i]*q_3[((k+1)+3)*M*N+j*N+i]-cons_1[((k+1)-3)*M*N+j*N+i]*q_3[((k+1)-3)*M*N+j*N+i])-0.0035*(cons_1[((k+1)+4)*M*N+j*N+i]*q_3[((k+1)+4)*M*N+j*N+i]-cons_1[((k+1)-4)*M*N+j*N+i]*q_3[((k+1)-4)*M*N+j*N+i]))*dxinv2; flux_1[(k+2)*M*N+j*N+i] -= (0.8*(cons_1[((k+2)+1)*M*N+j*N+i]*q_3[((k+2)+1)*M*N+j*N+i]-cons_1[((k+2)-1)*M*N+j*N+i]*q_3[((k+2)-1)*M*N+j*N+i])-0.2*(cons_1[((k+2)+2)*M*N+j*N+i]*q_3[((k+2)+2)*M*N+j*N+i]-cons_1[((k+2)-2)*M*N+j*N+i]*q_3[((k+2)-2)*M*N+j*N+i])+0.038*(cons_1[((k+2)+3)*M*N+j*N+i]*q_3[((k+2)+3)*M*N+j*N+i]-cons_1[((k+2)-3)*M*N+j*N+i]*q_3[((k+2)-3)*M*N+j*N+i])-0.0035*(cons_1[((k+2)+4)*M*N+j*N+i]*q_3[((k+2)+4)*M*N+j*N+i]-cons_1[((k+2)-4)*M*N+j*N+i]*q_3[((k+2)-4)*M*N+j*N+i]))*dxinv2; flux_1[(k+3)*M*N+j*N+i] -= (0.8*(cons_1[((k+3)+1)*M*N+j*N+i]*q_3[((k+3)+1)*M*N+j*N+i]-cons_1[((k+3)-1)*M*N+j*N+i]*q_3[((k+3)-1)*M*N+j*N+i])-0.2*(cons_1[((k+3)+2)*M*N+j*N+i]*q_3[((k+3)+2)*M*N+j*N+i]-cons_1[((k+3)-2)*M*N+j*N+i]*q_3[((k+3)-2)*M*N+j*N+i])+0.038*(cons_1[((k+3)+3)*M*N+j*N+i]*q_3[((k+3)+3)*M*N+j*N+i]-cons_1[((k+3)-3)*M*N+j*N+i]*q_3[((k+3)-3)*M*N+j*N+i])-0.0035*(cons_1[((k+3)+4)*M*N+j*N+i]*q_3[((k+3)+4)*M*N+j*N+i]-cons_1[((k+3)-4)*M*N+j*N+i]*q_3[((k+3)-4)*M*N+j*N+i]))*dxinv2; flux_2[k*M*N+j*N+i] -= (0.8*(cons_2[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_2[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i])-0.2*(cons_2[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_2[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i])+0.038*(cons_2[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_2[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i])-0.0035*(cons_2[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_2[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]))*dxinv2; flux_2[(k+1)*M*N+j*N+i] -= (0.8*(cons_2[((k+1)+1)*M*N+j*N+i]*q_3[((k+1)+1)*M*N+j*N+i]-cons_2[((k+1)-1)*M*N+j*N+i]*q_3[((k+1)-1)*M*N+j*N+i])-0.2*(cons_2[((k+1)+2)*M*N+j*N+i]*q_3[((k+1)+2)*M*N+j*N+i]-cons_2[((k+1)-2)*M*N+j*N+i]*q_3[((k+1)-2)*M*N+j*N+i])+0.038*(cons_2[((k+1)+3)*M*N+j*N+i]*q_3[((k+1)+3)*M*N+j*N+i]-cons_2[((k+1)-3)*M*N+j*N+i]*q_3[((k+1)-3)*M*N+j*N+i])-0.0035*(cons_2[((k+1)+4)*M*N+j*N+i]*q_3[((k+1)+4)*M*N+j*N+i]-cons_2[((k+1)-4)*M*N+j*N+i]*q_3[((k+1)-4)*M*N+j*N+i]))*dxinv2; flux_2[(k+2)*M*N+j*N+i] -= (0.8*(cons_2[((k+2)+1)*M*N+j*N+i]*q_3[((k+2)+1)*M*N+j*N+i]-cons_2[((k+2)-1)*M*N+j*N+i]*q_3[((k+2)-1)*M*N+j*N+i])-0.2*(cons_2[((k+2)+2)*M*N+j*N+i]*q_3[((k+2)+2)*M*N+j*N+i]-cons_2[((k+2)-2)*M*N+j*N+i]*q_3[((k+2)-2)*M*N+j*N+i])+0.038*(cons_2[((k+2)+3)*M*N+j*N+i]*q_3[((k+2)+3)*M*N+j*N+i]-cons_2[((k+2)-3)*M*N+j*N+i]*q_3[((k+2)-3)*M*N+j*N+i])-0.0035*(cons_2[((k+2)+4)*M*N+j*N+i]*q_3[((k+2)+4)*M*N+j*N+i]-cons_2[((k+2)-4)*M*N+j*N+i]*q_3[((k+2)-4)*M*N+j*N+i]))*dxinv2; flux_2[(k+3)*M*N+j*N+i] -= (0.8*(cons_2[((k+3)+1)*M*N+j*N+i]*q_3[((k+3)+1)*M*N+j*N+i]-cons_2[((k+3)-1)*M*N+j*N+i]*q_3[((k+3)-1)*M*N+j*N+i])-0.2*(cons_2[((k+3)+2)*M*N+j*N+i]*q_3[((k+3)+2)*M*N+j*N+i]-cons_2[((k+3)-2)*M*N+j*N+i]*q_3[((k+3)-2)*M*N+j*N+i])+0.038*(cons_2[((k+3)+3)*M*N+j*N+i]*q_3[((k+3)+3)*M*N+j*N+i]-cons_2[((k+3)-3)*M*N+j*N+i]*q_3[((k+3)-3)*M*N+j*N+i])-0.0035*(cons_2[((k+3)+4)*M*N+j*N+i]*q_3[((k+3)+4)*M*N+j*N+i]-cons_2[((k+3)-4)*M*N+j*N+i]*q_3[((k+3)-4)*M*N+j*N+i]))*dxinv2; flux_3[k*M*N+j*N+i] -= (0.8*(cons_3[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_3[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i]+(q_4[(k+1)*M*N+j*N+i]-q_4[(k-1)*M*N+j*N+i]))-0.2*(cons_3[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_3[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i]+(q_4[(k+2)*M*N+j*N+i]-q_4[(k-2)*M*N+j*N+i]))+0.038*(cons_3[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_3[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i]+(q_4[(k+3)*M*N+j*N+i]-q_4[(k-3)*M*N+j*N+i]))-0.0035*(cons_3[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_3[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]+(q_4[(k+4)*M*N+j*N+i]-q_4[(k-4)*M*N+j*N+i])))*dxinv2; flux_3[(k+1)*M*N+j*N+i] -= (0.8*(cons_3[((k+1)+1)*M*N+j*N+i]*q_3[((k+1)+1)*M*N+j*N+i]-cons_3[((k+1)-1)*M*N+j*N+i]*q_3[((k+1)-1)*M*N+j*N+i]+(q_4[((k+1)+1)*M*N+j*N+i]-q_4[((k+1)-1)*M*N+j*N+i]))-0.2*(cons_3[((k+1)+2)*M*N+j*N+i]*q_3[((k+1)+2)*M*N+j*N+i]-cons_3[((k+1)-2)*M*N+j*N+i]*q_3[((k+1)-2)*M*N+j*N+i]+(q_4[((k+1)+2)*M*N+j*N+i]-q_4[((k+1)-2)*M*N+j*N+i]))+0.038*(cons_3[((k+1)+3)*M*N+j*N+i]*q_3[((k+1)+3)*M*N+j*N+i]-cons_3[((k+1)-3)*M*N+j*N+i]*q_3[((k+1)-3)*M*N+j*N+i]+(q_4[((k+1)+3)*M*N+j*N+i]-q_4[((k+1)-3)*M*N+j*N+i]))-0.0035*(cons_3[((k+1)+4)*M*N+j*N+i]*q_3[((k+1)+4)*M*N+j*N+i]-cons_3[((k+1)-4)*M*N+j*N+i]*q_3[((k+1)-4)*M*N+j*N+i]+(q_4[((k+1)+4)*M*N+j*N+i]-q_4[((k+1)-4)*M*N+j*N+i])))*dxinv2; flux_3[(k+2)*M*N+j*N+i] -= (0.8*(cons_3[((k+2)+1)*M*N+j*N+i]*q_3[((k+2)+1)*M*N+j*N+i]-cons_3[((k+2)-1)*M*N+j*N+i]*q_3[((k+2)-1)*M*N+j*N+i]+(q_4[((k+2)+1)*M*N+j*N+i]-q_4[((k+2)-1)*M*N+j*N+i]))-0.2*(cons_3[((k+2)+2)*M*N+j*N+i]*q_3[((k+2)+2)*M*N+j*N+i]-cons_3[((k+2)-2)*M*N+j*N+i]*q_3[((k+2)-2)*M*N+j*N+i]+(q_4[((k+2)+2)*M*N+j*N+i]-q_4[((k+2)-2)*M*N+j*N+i]))+0.038*(cons_3[((k+2)+3)*M*N+j*N+i]*q_3[((k+2)+3)*M*N+j*N+i]-cons_3[((k+2)-3)*M*N+j*N+i]*q_3[((k+2)-3)*M*N+j*N+i]+(q_4[((k+2)+3)*M*N+j*N+i]-q_4[((k+2)-3)*M*N+j*N+i]))-0.0035*(cons_3[((k+2)+4)*M*N+j*N+i]*q_3[((k+2)+4)*M*N+j*N+i]-cons_3[((k+2)-4)*M*N+j*N+i]*q_3[((k+2)-4)*M*N+j*N+i]+(q_4[((k+2)+4)*M*N+j*N+i]-q_4[((k+2)-4)*M*N+j*N+i])))*dxinv2; flux_3[(k+3)*M*N+j*N+i] -= (0.8*(cons_3[((k+3)+1)*M*N+j*N+i]*q_3[((k+3)+1)*M*N+j*N+i]-cons_3[((k+3)-1)*M*N+j*N+i]*q_3[((k+3)-1)*M*N+j*N+i]+(q_4[((k+3)+1)*M*N+j*N+i]-q_4[((k+3)-1)*M*N+j*N+i]))-0.2*(cons_3[((k+3)+2)*M*N+j*N+i]*q_3[((k+3)+2)*M*N+j*N+i]-cons_3[((k+3)-2)*M*N+j*N+i]*q_3[((k+3)-2)*M*N+j*N+i]+(q_4[((k+3)+2)*M*N+j*N+i]-q_4[((k+3)-2)*M*N+j*N+i]))+0.038*(cons_3[((k+3)+3)*M*N+j*N+i]*q_3[((k+3)+3)*M*N+j*N+i]-cons_3[((k+3)-3)*M*N+j*N+i]*q_3[((k+3)-3)*M*N+j*N+i]+(q_4[((k+3)+3)*M*N+j*N+i]-q_4[((k+3)-3)*M*N+j*N+i]))-0.0035*(cons_3[((k+3)+4)*M*N+j*N+i]*q_3[((k+3)+4)*M*N+j*N+i]-cons_3[((k+3)-4)*M*N+j*N+i]*q_3[((k+3)-4)*M*N+j*N+i]+(q_4[((k+3)+4)*M*N+j*N+i]-q_4[((k+3)-4)*M*N+j*N+i])))*dxinv2; } } __global__ void hypterm_3 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(2*blockdim_k); int k = max (k0, 0) + 2*(int)(threadIdx.z); double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4; double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4; double (*q_1)[308][308] = (double (*)[308][308])q_in_1; double (*q_2)[308][308] = (double (*)[308][308])q_in_2; double (*q_3)[308][308] = (double (*)[308][308])q_in_3; double (*q_4)[308][308] = (double (*)[308][308])q_in_4; if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) { double _t_5_; double _t_1_; double _t_2_; double _t_3_; double _t_4_; double _t_0_; double flux_4kc0jc0ic0 = 0; double _t_11_; double _t_7_; double _t_10_; double _t_8_; double _t_9_; double _t_6_; double flux_4kp1jc0ic0 = 0; double _t_17_; double _t_13_; double _t_14_; double _t_15_; double _t_16_; double _t_12_; double _t_23_; double _t_19_; double _t_20_; double _t_21_; double _t_22_; double _t_18_; double _t_29_; double _t_34_; double _t_25_; double _t_27_; double _t_31_; double _t_32_; double _t_26_; double _t_33_; double _t_28_; double _t_35_; double _t_24_; double _t_30_; _t_5_ = -cons_4[k][j][i-4] * q_1[k][j][i-4]; _t_5_ -= q_4[k][j][i-4] * q_1[k][j][i-4]; _t_5_ += cons_4[k][j][i+4] * q_1[k][j][i+4]; _t_5_ += q_4[k][j][i+4] * q_1[k][j][i+4]; _t_1_ = -0.0035 * _t_5_; _t_2_ = -cons_4[k][j][i-1] * q_1[k][j][i-1]; _t_2_ -= q_4[k][j][i-1] * q_1[k][j][i-1]; _t_2_ += cons_4[k][j][i+1] * q_1[k][j][i+1]; _t_2_ += q_4[k][j][i+1] * q_1[k][j][i+1]; _t_1_ += 0.8 * _t_2_; _t_3_ = -cons_4[k][j][i-2] * q_1[k][j][i-2]; _t_3_ -= q_4[k][j][i-2] * q_1[k][j][i-2]; _t_3_ += cons_4[k][j][i+2] * q_1[k][j][i+2]; _t_3_ += q_4[k][j][i+2] * q_1[k][j][i+2]; _t_1_ -= 0.2 * _t_3_; _t_4_ = -cons_4[k][j][i-3] * q_1[k][j][i-3]; _t_4_ -= q_4[k][j][i-3] * q_1[k][j][i-3]; _t_4_ += cons_4[k][j][i+3] * q_1[k][j][i+3]; _t_4_ += q_4[k][j][i+3] * q_1[k][j][i+3]; _t_1_ += 0.038 * _t_4_; _t_0_ = _t_1_ * dxinv0; flux_4kc0jc0ic0 -= _t_0_; _t_11_ = -cons_4[k+1][j][i-4] * q_1[k+1][j][i-4]; _t_11_ -= q_4[k+1][j][i-4] * q_1[k+1][j][i-4]; _t_11_ += cons_4[k+1][j][i+4] * q_1[k+1][j][i+4]; _t_11_ += q_4[k+1][j][i+4] * q_1[k+1][j][i+4]; _t_7_ = -0.0035 * _t_11_; _t_10_ = -cons_4[k+1][j][i-3] * q_1[k+1][j][i-3]; _t_10_ -= q_4[k+1][j][i-3] * q_1[k+1][j][i-3]; _t_10_ += cons_4[k+1][j][i+3] * q_1[k+1][j][i+3]; _t_10_ += q_4[k+1][j][i+3] * q_1[k+1][j][i+3]; _t_7_ += 0.038 * _t_10_; _t_8_ = -cons_4[k+1][j][i-1] * q_1[k+1][j][i-1]; _t_8_ -= q_4[k+1][j][i-1] * q_1[k+1][j][i-1]; _t_8_ += cons_4[k+1][j][i+1] * q_1[k+1][j][i+1]; _t_8_ += q_4[k+1][j][i+1] * q_1[k+1][j][i+1]; _t_7_ += 0.8 * _t_8_; _t_9_ = -cons_4[k+1][j][i-2] * q_1[k+1][j][i-2]; _t_9_ -= q_4[k+1][j][i-2] * q_1[k+1][j][i-2]; _t_9_ += cons_4[k+1][j][i+2] * q_1[k+1][j][i+2]; _t_9_ += q_4[k+1][j][i+2] * q_1[k+1][j][i+2]; _t_7_ -= 0.2 * _t_9_; _t_6_ = _t_7_ * dxinv0; flux_4kp1jc0ic0 -= _t_6_; _t_17_ = -cons_4[k][j-4][i] * q_2[k][j-4][i]; _t_17_ -= q_4[k][j-4][i] * q_2[k][j-4][i]; _t_17_ += cons_4[k][j+4][i] * q_2[k][j+4][i]; _t_17_ += q_4[k][j+4][i] * q_2[k][j+4][i]; _t_13_ = -0.0035 * _t_17_; _t_14_ = -cons_4[k][j-1][i] * q_2[k][j-1][i]; _t_14_ -= q_4[k][j-1][i] * q_2[k][j-1][i]; _t_14_ += cons_4[k][j+1][i] * q_2[k][j+1][i]; _t_14_ += q_4[k][j+1][i] * q_2[k][j+1][i]; _t_13_ += 0.8 * _t_14_; _t_15_ = -cons_4[k][j-2][i] * q_2[k][j-2][i]; _t_15_ -= q_4[k][j-2][i] * q_2[k][j-2][i]; _t_15_ += cons_4[k][j+2][i] * q_2[k][j+2][i]; _t_15_ += q_4[k][j+2][i] * q_2[k][j+2][i]; _t_13_ -= 0.2 * _t_15_; _t_16_ = -cons_4[k][j-3][i] * q_2[k][j-3][i]; _t_16_ -= q_4[k][j-3][i] * q_2[k][j-3][i]; _t_16_ += cons_4[k][j+3][i] * q_2[k][j+3][i]; _t_16_ += q_4[k][j+3][i] * q_2[k][j+3][i]; _t_13_ += 0.038 * _t_16_; _t_12_ = _t_13_ * dxinv1; flux_4kc0jc0ic0 -= _t_12_; _t_23_ = -cons_4[k+1][j-4][i] * q_2[k+1][j-4][i]; _t_23_ -= q_4[k+1][j-4][i] * q_2[k+1][j-4][i]; _t_23_ += cons_4[k+1][j+4][i] * q_2[k+1][j+4][i]; _t_23_ += q_4[k+1][j+4][i] * q_2[k+1][j+4][i]; _t_19_ = -0.0035 * _t_23_; _t_20_ = -cons_4[k+1][j-1][i] * q_2[k+1][j-1][i]; _t_20_ -= q_4[k+1][j-1][i] * q_2[k+1][j-1][i]; _t_20_ += cons_4[k+1][j+1][i] * q_2[k+1][j+1][i]; _t_20_ += q_4[k+1][j+1][i] * q_2[k+1][j+1][i]; _t_19_ += 0.8 * _t_20_; _t_21_ = -cons_4[k+1][j-2][i] * q_2[k+1][j-2][i]; _t_21_ -= q_4[k+1][j-2][i] * q_2[k+1][j-2][i]; _t_21_ += cons_4[k+1][j+2][i] * q_2[k+1][j+2][i]; _t_21_ += q_4[k+1][j+2][i] * q_2[k+1][j+2][i]; _t_19_ -= 0.2 * _t_21_; _t_22_ = -cons_4[k+1][j-3][i] * q_2[k+1][j-3][i]; _t_22_ -= q_4[k+1][j-3][i] * q_2[k+1][j-3][i]; _t_22_ += cons_4[k+1][j+3][i] * q_2[k+1][j+3][i]; _t_22_ += q_4[k+1][j+3][i] * q_2[k+1][j+3][i]; _t_19_ += 0.038 * _t_22_; _t_18_ = _t_19_ * dxinv1; flux_4kp1jc0ic0 -= _t_18_; _t_29_ = -cons_4[k-4][j][i] * q_3[k-4][j][i]; _t_29_ -= q_4[k-4][j][i] * q_3[k-4][j][i]; _t_29_ += cons_4[k+4][j][i] * q_3[k+4][j][i]; _t_34_ = cons_4[k+4][j][i] * q_3[k+4][j][i]; _t_29_ += q_4[k+4][j][i] * q_3[k+4][j][i]; _t_34_ += q_4[k+4][j][i] * q_3[k+4][j][i]; _t_25_ = -0.0035 * _t_29_; _t_34_ -= cons_4[k-2][j][i] * q_3[k-2][j][i]; _t_27_ = -cons_4[k-2][j][i] * q_3[k-2][j][i]; _t_27_ -= q_4[k-2][j][i] * q_3[k-2][j][i]; _t_34_ -= q_4[k-2][j][i] * q_3[k-2][j][i]; _t_31_ = 0.038 * _t_34_; _t_27_ += cons_4[k+2][j][i] * q_3[k+2][j][i]; _t_32_ = cons_4[k+2][j][i] * q_3[k+2][j][i]; _t_27_ += q_4[k+2][j][i] * q_3[k+2][j][i]; _t_25_ -= 0.2 * _t_27_; _t_32_ += q_4[k+2][j][i] * q_3[k+2][j][i]; _t_32_ -= cons_4[k][j][i] * q_3[k][j][i]; _t_32_ -= q_4[k][j][i] * q_3[k][j][i]; _t_31_ += 0.8 * _t_32_; _t_26_ = cons_4[k+1][j][i] * q_3[k+1][j][i]; _t_26_ += q_4[k+1][j][i] * q_3[k+1][j][i]; _t_26_ -= cons_4[k-1][j][i] * q_3[k-1][j][i]; _t_33_ = -cons_4[k-1][j][i] * q_3[k-1][j][i]; _t_26_ -= q_4[k-1][j][i] * q_3[k-1][j][i]; _t_25_ += 0.8 * _t_26_; _t_33_ -= q_4[k-1][j][i] * q_3[k-1][j][i]; _t_33_ += cons_4[k+3][j][i] * q_3[k+3][j][i]; _t_28_ = cons_4[k+3][j][i] * q_3[k+3][j][i]; _t_28_ += q_4[k+3][j][i] * q_3[k+3][j][i]; _t_33_ += q_4[k+3][j][i] * q_3[k+3][j][i]; _t_31_ -= 0.2 * _t_33_; _t_28_ -= cons_4[k-3][j][i] * q_3[k-3][j][i]; _t_35_ = -cons_4[k-3][j][i] * q_3[k-3][j][i]; _t_28_ -= q_4[k-3][j][i] * q_3[k-3][j][i]; _t_25_ += 0.038 * _t_28_; _t_35_ -= q_4[k-3][j][i] * q_3[k-3][j][i]; _t_35_ += cons_4[k+5][j][i] * q_3[k+5][j][i]; _t_35_ += q_4[k+5][j][i] * q_3[k+5][j][i]; _t_31_ -= 0.0035 * _t_35_; _t_24_ = _t_25_ * dxinv2; flux_4kc0jc0ic0 -= _t_24_; flux_4[k][j][i] = flux_4kc0jc0ic0; _t_30_ = _t_31_ * dxinv2; flux_4kp1jc0ic0 -= _t_30_; flux_4[k+1][j][i] = flux_4kp1jc0ic0; } } extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) { double *flux_0; cudaMalloc (&flux_0, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_0\n"); cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *flux_1; cudaMalloc (&flux_1, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_1\n"); cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *flux_2; cudaMalloc (&flux_2, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_2\n"); cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *flux_3; cudaMalloc (&flux_3, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_3\n"); cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *flux_4; cudaMalloc (&flux_4, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_4\n"); cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *cons_1; cudaMalloc (&cons_1, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_1\n"); cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *cons_2; cudaMalloc (&cons_2, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_2\n"); cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *cons_3; cudaMalloc (&cons_3, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_3\n"); cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *cons_4; cudaMalloc (&cons_4, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_4\n"); cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *q_1; cudaMalloc (&q_1, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_1\n"); cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *q_2; cudaMalloc (&q_2, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_2\n"); cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *q_3; cudaMalloc (&q_3, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_3\n"); cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *q_4; cudaMalloc (&q_4, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_4\n"); cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 4, 4); dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z)); hypterm_1 <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N); dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 4*blockconfig.z)); hypterm_2 <<<gridconfig_2, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N); dim3 gridconfig_3 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z)); hypterm_3 <<<gridconfig_3, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N); cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); }
22,035
#include <stdio.h> int main() { cudaDeviceProp prop; int count; cudaGetDeviceCount( &count ); for(int i=0; i< count; i++) { cudaGetDeviceProperties( &prop, i ); printf( " ---General Information for device%d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Computecapability: %d.%d\n", prop.major, prop.minor); printf( "Clockrate: %d\n", prop.clockRate); printf( " ---Memory Information for device%d ---\n", i ); printf( "Total global mem: %lu\n", prop.totalGlobalMem); printf( "Total constant Mem: %ld\n", prop.totalConstMem); printf( "Max mem pitch: %ld\n", prop.memPitch); printf( "Texture Alignment: %ld\n", prop.textureAlignment);printf( " ---MP Information for device%d ---\n", i ); printf( "Multiprocessorcount: %d\n", prop.multiProcessorCount);printf( "Sharedmem per mp: %ld\n", prop.sharedMemPerBlock); printf( "Registersper mp: %d\n", prop.regsPerBlock); printf( "Threads in warp: %d\n", prop.warpSize); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock); printf( "Max thread dimensions: (%d, %d, %d)\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1],prop.maxThreadsDim[2] ); printf( "Max griddimensions: (%d, %d, %d)\n",prop.maxGridSize[0], prop.maxGridSize[1],prop.maxGridSize[2] ); printf( "\n" ); } return 0; }
22,036
// #ifdef __cplusplus // extern "C" { // #endif #include <float.h> #include <math.h> #include <stdio.h> #include <vector> #define DIVUP(m, n) ((m) / (m) + ((m) % (n) > 0)) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) __global__ void PCLLossForward( const int nthreads, const float* bottom_data, const float* labels, const float* cls_loss_weights, const float* pc_labels, const float* pc_probs, const float* img_cls_loss_weights, const float* im_labels, const int batch_size, const int num_positive, float* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { top_data[index] = 0; if (im_labels[index] != 0) { if (index == 0) { for (int i = 0; i < batch_size; i++) { if (labels[i] == 0) { top_data[index] -= cls_loss_weights[i] * log(bottom_data[i * nthreads + index]); } } } else { for (int i = 0; i < num_positive; i++) { if (pc_labels[i] == index) { top_data[index] -= img_cls_loss_weights[i] * log(pc_probs[i]); } } } } } } int PCLLossForwardLaucher( const float* bottom_data, const float* labels, const float* cls_loss_weights, const float* pc_labels, const float* pc_probs, const float* img_cls_loss_weights, const float* im_labels, const int batch_size, const int channels, const int num_positive, float* top_data, cudaStream_t stream) { const int kThreadsPerBlock = 4; cudaError_t err; PCLLossForward<<< (channels + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( channels, bottom_data, labels, cls_loss_weights, pc_labels, pc_probs, img_cls_loss_weights, im_labels, batch_size, num_positive, top_data); // dim3 blocks(DIVUP(output_size, kThreadsPerBlock), // DIVUP(output_size, kThreadsPerBlock)); // dim3 threads(kThreadsPerBlock); // // ROIPoolForward<<<blocks, threads, 0, stream>>>( // output_size, bottom_data, spatial_scale, height, width, channels, // pooled_height, pooled_width, bottom_rois, top_data, argmax_data); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } __global__ void PCLLossBackward( const int nthreads, const float* prob_data, const float* labels, const float* cls_loss_weights, const float* gt_assignment, const float* pc_labels, const float* pc_probs, const float* pc_count, const float* img_cls_loss_weights, const float* im_labels, const int channels, float* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int i = index / channels; int c = index % channels; bottom_diff[index] = 0; if (im_labels[c] != 0) { if (c == 0) { if (labels[i] == 0) { bottom_diff[index] = -cls_loss_weights[i] / prob_data[index]; } } else { if (labels[i] == c) { int pc_index = gt_assignment[i]; if (c != pc_labels[pc_index]) { printf("labels mismatch.\n"); } bottom_diff[index] = -img_cls_loss_weights[pc_index] / (pc_count[pc_index] * pc_probs[pc_index]); } } } } } int PCLLossBackwardLaucher( const float* top_diff, const float* prob_data, const float* labels, const float* cls_loss_weights, const float* gt_assignment, const float* pc_labels, const float* pc_probs, const float* pc_count, const float* img_cls_loss_weights, const float* im_labels, const int batch_size, const int channels, float* bottom_diff, cudaStream_t stream) { const int kThreadsPerBlock = 16; int output_size = batch_size * channels; cudaError_t err; PCLLossBackward<<< (output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, prob_data, labels, cls_loss_weights, gt_assignment, pc_labels, pc_probs, pc_count, img_cls_loss_weights, im_labels, channels, bottom_diff); // dim3 blocks(DIVUP(output_size, kThreadsPerBlock), // DIVUP(output_size, kThreadsPerBlock)); // dim3 threads(kThreadsPerBlock); // // ROIPoolBackward<<<blocks, threads, 0, stream>>>( // output_size, top_diff, argmax_data, num_rois, spatial_scale, height, // width, channels, pooled_height, pooled_width, bottom_diff, bottom_rois); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } // #ifdef __cplusplus // } // #endif
22,037
#ifdef D_CUDA #include "ParticleKernel.cuh" #include "Dot/Core.h" #include <GL/glew.h> #include <cuda_gl_interop.h> #include <iostream> namespace Dot { __global__ void _Init(float3* pos, float3* vel, int count,curandState * state) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < count) { pos[id] = make_float3(0, 0, 0); curand_init(1234, id, 0, &state[id]); float rand = curand_uniform(&(state[id])); vel[id].x = rand/5; float rand1 = curand_uniform(&(state[id])); vel[id].y = -rand1/5; float rand2 = curand_uniform(&(state[id])); vel[id].z = rand2/5; } } __global__ void _UpdateKernel(float3* pos, float3* vel, int count,float dt) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < count) { pos[id].x += vel[id].x * dt; pos[id].y += vel[id].y * dt; pos[id].z += vel[id].z * dt; if (abs(pos[id].x) >= 30 || abs(pos[id].y) >= 30 || abs(pos[id].z) >= 30) { pos[id].x = 0; pos[id].y = 0; pos[id].z = 0; } } } DefaultParticleEffect::DefaultParticleEffect(unsigned int count) : m_count(count), m_positions(NULL), m_velocities(NULL) { CudaErrChk(cudaMallocManaged(&m_positions, sizeof(float3) * m_count)); CudaErrChk(cudaMallocManaged(&m_velocities, sizeof(float3) * m_count)); curandState* d_state; CudaErrChk(cudaMalloc(&d_state, sizeof(curandState))); _Init << <m_count / 256 + 1,256 >> > (m_positions, m_velocities, m_count,d_state); CudaErrChk(cudaDeviceSynchronize()); CudaErrChk(cudaFree(d_state)); } DefaultParticleEffect::~DefaultParticleEffect() { CudaErrChk(cudaDeviceSynchronize()); CudaErrChk(cudaFree(m_positions)); CudaErrChk(cudaFree(m_velocities)); cudaDeviceReset(); } void DefaultParticleEffect::Launch(float dt) { _UpdateKernel << <m_count / 256 + 1, 256 >> > (m_positions, m_velocities, m_count, dt); CudaErrChk(cudaDeviceSynchronize()); } __global__ void _TestInit(float3* pos,float3* vel, int count, curandState* state) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < count) { curand_init(1234, id, 0, &state[id]); pos[id] = make_float3(0,0,0); float rand = curand_uniform(&(state[id])); vel[id].x = rand / 5; float rand1 = curand_uniform(&(state[id])); vel[id].y = -rand1 / 5; float rand2 = curand_uniform(&(state[id])); vel[id].z = rand2 / 5; } } __global__ void _TestUpdateKernel(float3* pos, float3* vel, int count, float dt) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < count) { pos[id].x += vel[id].x * dt; pos[id].y += vel[id].y * dt; pos[id].z += vel[id].z * dt; } } TestParticleEffect::TestParticleEffect(unsigned int count) : m_count(count) { glGenVertexArrays(1, &m_VAO); glBindVertexArray(m_VAO); glGenBuffers(1, &m_VBO); glBindBuffer(GL_ARRAY_BUFFER, m_VBO); glBufferData(GL_ARRAY_BUFFER, count * sizeof(float3), NULL, GL_DYNAMIC_DRAW); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0); glVertexAttribDivisor(0, 1); glBindBuffer(GL_ARRAY_BUFFER, 0); CudaErrChk(cudaGLRegisterBufferObject(m_VBO)); curandState* d_state; cudaMalloc(&m_velocities, m_count * sizeof(float3)); CudaErrChk(cudaMalloc(&d_state, sizeof(curandState))); CudaErrChk(cudaMalloc(&m_positions, m_count * sizeof(float3))); CudaErrChk(cudaGLMapBufferObject((void**)& m_positions, m_VBO)); _TestInit << <m_count / 256 + 1, 256 >> > (m_positions,m_velocities, m_count,d_state); CudaErrChk(cudaGLUnmapBufferObject(m_VBO)); cudaFree(d_state); glBindVertexArray(0); cudaDeviceSynchronize(); } TestParticleEffect::~TestParticleEffect() { CudaErrChk(cudaDeviceSynchronize()); CudaErrChk(cudaGLUnregisterBufferObject(m_VBO)); CudaErrChk(cudaFree(m_positions)); CudaErrChk(cudaFree(m_velocities)); glDeleteBuffers(1, &m_VBO); glDeleteBuffers(1, &m_VAO); } void TestParticleEffect::Render(float dt) { cudaDeviceSynchronize(); CudaErrChk(cudaGLMapBufferObject((void**)& m_positions, m_VBO)); _TestUpdateKernel << <m_count/256+1, 256 >> > (m_positions,m_velocities, m_count, dt); CudaErrChk(cudaGLUnmapBufferObject(m_VBO)); cudaDeviceSynchronize(); glBindVertexArray(m_VAO); glDrawArraysInstanced(GL_POINTS, 0,1,m_count); glBindVertexArray(0); } void TestParticleEffect::Launch(float dt) { } } #endif
22,038
#include <fstream> #include <string> #include <iostream> #include <stdint.h> #include <math.h> #include <utility> #include <sys/time.h> #include <limits> #include <stdlib.h> #include <unistd.h> #include <sstream> #include <map> #include <bitset> using namespace std; #define STEPSIZE 1 // step size in pixels, e.g. 2 = every second pixel // compile with: // make && ./main s11e121.txt 0 0 struct Point{ short row; short col; }; struct Pixel{ int elevation; int mountingHeight; }; struct Terrain{ short nRows; short nCols; float cellsize; float xllcorner; float yllcorner; double noDataValue; Point *towerLocations; Pixel *gridTerrian; }; clock_t start = 0, endt; double elapsed; void Print_Time() { endt = clock(); elapsed = ((double)(endt - start)) / CLOCKS_PER_SEC; start = endt; cerr << "GPU Time: " << elapsed << endl; } #define CUDA_CALL(cuda_function, ...) { \ cudaError_t status = cuda_function(__VA_ARGS__); \ cudaEnsureSuccess(status, #cuda_function, true, __FILE__, __LINE__); \ } bool cudaEnsureSuccess(cudaError_t status, const char* status_context_description, bool die_on_error, const char* filename, unsigned line_number) { if (status_context_description == NULL) status_context_description = ""; if (status == cudaSuccess) { #if REPORT_CUDA_SUCCESS cerr << "Succeeded: " << status_context_description << std::endl << std::flush; #endif return true; } const char* errorString = cudaGetErrorString(status); cerr << "CUDA Error: "; if (status_context_description != NULL) { cerr << status_context_description << ": "; } if (errorString != NULL) { cerr << errorString; } else { cerr << "(Unknown CUDA status code " << status << ")"; } if (filename != NULL) { cerr << filename << ":" << line_number; } cerr << std::endl; if(die_on_error) { exit(EXIT_FAILURE); // ... or cerr << "FATAL ERROR" << etc. etc. } return false; } void printCudaMemory(char* info) { size_t free_byte ; size_t total_byte ; cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != cuda_status ){ printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; printf("[%s]GPU memory usage: used = %f, free = %f MB, total = %f MB\n", info, used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); } string remove_multiple_spaces(string input){ string output; int k = 0, i = 0; for(i = 0,k=0; i < input.length(); i++, k++){ if(input[i] == ' ' && input[i+1] == ' '){ k--; continue; } output += input[i]; } output += '\0'; return output; } string breakStringByspace(string input){ bool spaceCome = false; string output; for(int i = 0; i < input.length(); i++){ if(spaceCome){ output += input[i]; } if(input[i] == ' '){ spaceCome = true; } } output += '\0'; return output; } void splitStringBySpace(string input, int * output, int nCols){ string elev = ""; int elevationindex = 0; int startCol = 0; // ignore space at beginning of line (happens when exporting file from GRASS) if (input[0] == ' ') { startCol = 1; } for(int i = startCol; i < input.length(); i++){ if(input[i] == ' '){ output[elevationindex] = atoi(elev.c_str()); elevationindex++; elev = ""; continue; } elev += input[i]; if(i == input.length() - 1){ output[elevationindex] = atoi(elev.c_str()); } } } void setBitHost(unsigned char *A, unsigned long long int k) { A[k/8] |= 1 << (k%8); } __device__ void setBit(unsigned char *A, unsigned long long int k) { A[k/8] |= 1 << (k%8); } short getBit(unsigned char *A, unsigned long long int k) { return ( (A[k/8] & (1 << (k%8) )) != 0 ) ; } void readFileAndReturnTerrain(Terrain *h_terrain, char* file_name, int towerHeight, char* pointHeighMappingFilePath){ ifstream fin; fin.open(file_name); // assume the file is in AAIGrid format string line_ncols, line_nrows, line_xllcorner, line_yllcorner, line_dx,line_dy, line_nodata; getline(fin, line_ncols); getline(fin, line_nrows); getline(fin, line_xllcorner); getline(fin, line_yllcorner); getline(fin, line_dx); getline(fin, line_dy); getline(fin, line_nodata); line_ncols = remove_multiple_spaces(line_ncols); line_nrows = remove_multiple_spaces(line_nrows); if(line_nodata.find("NODATA_value") != string::npos){ line_nodata = remove_multiple_spaces(line_nodata); } int nCols = atoi(breakStringByspace(line_ncols).c_str()); int nRows = atoi(breakStringByspace(line_nrows).c_str()); float xllcorner = atof(breakStringByspace(line_xllcorner).c_str()); float yllcorner = atof(breakStringByspace(line_yllcorner).c_str()); // for easier calculations later on, make sure we are dealing only with an even grid size (ignore the last row and column) h_terrain->nCols = nCols; //% 2 == 0 ? nCols : nCols-1; h_terrain->nRows = nRows; //% 2 == 0 ? nRows : nRows-1; h_terrain->xllcorner = xllcorner; h_terrain->yllcorner = yllcorner; if(line_nodata.find("NODATA_value") == string::npos){ h_terrain->noDataValue = 65535; }else{ h_terrain->noDataValue = atoi(breakStringByspace(line_nodata).c_str()); } h_terrain->gridTerrian = (Pixel*) malloc((h_terrain->nRows * h_terrain->nCols) * sizeof(Pixel)); int rowIndex = 0; int * pixelInRow = (int*) malloc(h_terrain->nCols * sizeof(int)); // loop through the file and break when finish or nRow-1 is reached if(line_nodata.find("NODATA_value") == string::npos){ splitStringBySpace(line_nodata, pixelInRow, h_terrain->nCols); for(int j = 0; j < h_terrain->nCols; j++){ h_terrain->gridTerrian[(h_terrain->nCols * rowIndex) + j].elevation = pixelInRow[j]; } rowIndex++; } while(getline(fin, line_ncols) && rowIndex != h_terrain->nRows){ splitStringBySpace(line_ncols, pixelInRow, h_terrain->nCols); for(int j = 0; j < h_terrain->nCols; j++){ h_terrain->gridTerrian[(h_terrain->nCols * rowIndex) + j].elevation = pixelInRow[j]; } rowIndex++; } free(pixelInRow); /* * Print the grid for testing * for(int i = 0; i < h_terrain->nRows; i++){ for(int j = 0; j < h_terrain->nCols; j++){ cout << h_terrain->gridTerrian[i * h_terrain->nRows + j].elevation << " "; } cout << endl; } exit(1); * * */ ///// ifstream pointHeighMappingFile(pointHeighMappingFilePath); map<long, int> towerHeightMap; string line;long towerIndex; int height; while (pointHeighMappingFile >> line){ stringstream ss(line); string token; getline(ss, token, ','); towerIndex = stol(token); getline(ss, token, ','); height = stoi(token); towerHeightMap[towerIndex] = height; } pointHeighMappingFile.close(); map<long, int>::iterator it; for(it = towerHeightMap.begin(); it != towerHeightMap.end(); ++it){ std::cout << it->first << " => " << it->second << '\n'; } ///// h_terrain->towerLocations = (Point*) malloc((h_terrain->nRows * h_terrain->nCols) * sizeof(Point)); towerIndex = 0; for(int i = 0; i < h_terrain->nRows; i++){ for(int j = 0; j < h_terrain->nCols; j++){ // cout << h_terrain->gridTerrian[h_terrain->nCols * i + j].elevation << " "; if(towerHeightMap.count((h_terrain->nCols * i) + j) > 0){ h_terrain->gridTerrian[(h_terrain->nCols * i) + j].mountingHeight = towerHeightMap[(h_terrain->nCols * i) + j]; }else{ h_terrain->gridTerrian[(h_terrain->nCols * i) + j].mountingHeight = towerHeight; } h_terrain->towerLocations[towerIndex].row = i; h_terrain->towerLocations[towerIndex].col = j; towerIndex++; } // cout << endl; } //testing of mounting height // for(int i = 0; i < h_terrain->nRows; i++){ // for(int j = 0; j < h_terrain->nCols; j++){ // cout << "Height of tower " << (h_terrain->nCols * i) + j << " is: " << h_terrain->gridTerrian[(h_terrain->nCols * i) + j].mountingHeight << endl; // } // } //For testing tower position //cout << h_terrain->towerLocations[7].row << h_terrain->towerLocations[7].col << endl; //cout << h_terrain->gridTerrian[(h_terrain->nCols * h_terrain->towerLocations[7].row) + h_terrain->towerLocations[7].col].elevation fin.close(); } __device__ double calculateGradientOnLine(Pixel *inputTerrain, Point *observer, Point *target, int width){ double distanceFromObserverToTarget = sqrtf(powf( (float) (target->row - observer->row) , 2.0) + powf( (float) (target->col - observer->col) , 2.0)); int gridIndexTarget = (width * target->row) + target->col; int gridIndexObserver = (width * observer->row) + observer->col; double targetTotalElevation = inputTerrain[gridIndexTarget].elevation + inputTerrain[gridIndexTarget].mountingHeight; double observerTotalElevation = inputTerrain[gridIndexObserver].elevation + inputTerrain[gridIndexObserver].mountingHeight; return (targetTotalElevation - observerTotalElevation) / distanceFromObserverToTarget; } __device__ double calculateGradientOnLineWithoutTowerHeight(Pixel *inputTerrain, Point *observer, Point *target, int width){ double distanceFromObserverToTarget = sqrtf(powf( (float) (target->row - observer->row) , 2.0) + powf( (float) (target->col - observer->col) , 2.0)); int gridIndexTarget = (width * target->row) + target->col; int gridIndexObserver = (width * observer->row) + observer->col; double targetTotalElevation = inputTerrain[gridIndexTarget].elevation; double observerTotalElevation = inputTerrain[gridIndexObserver].elevation + inputTerrain[gridIndexObserver].mountingHeight; return (targetTotalElevation - observerTotalElevation) / distanceFromObserverToTarget; } int getBoundaryAroundObserver(int nRows, int nCols, Point *observerBoundary){ int topRow = 0; int bottomRow = nRows-1; int leftCol = 0; int rhtCol = nCols-1; //storing boundary points around observer int observerBoundaryIndex = 0; for(int i = leftCol; i <= rhtCol; i = i + STEPSIZE ){ Point p; p.row = topRow; p.col = i; observerBoundary[observerBoundaryIndex] = p; observerBoundaryIndex++; } for(int i = topRow+1; i <= bottomRow; i = i + STEPSIZE){ Point p; p.row = i; p.col = rhtCol; observerBoundary[observerBoundaryIndex] = p; observerBoundaryIndex++; } for(int i = rhtCol-1; i >= leftCol; i = i - STEPSIZE){ Point p; p.row = bottomRow; p.col = i; observerBoundary[observerBoundaryIndex] = p; observerBoundaryIndex++; } for(int i = bottomRow-1; i > topRow; i = i - STEPSIZE){ Point p; p.row = i; p.col = leftCol; observerBoundary[observerBoundaryIndex] = p; observerBoundaryIndex++; } return observerBoundaryIndex; } __device__ void my_swap(float &x, float &y){ float temp = 0.0; temp = x; x = y; y = temp; } __device__ int getPointsOnLine(Point *start, Point *end, Point *allPointsInLine, short nCols){ // Bresenham's line algorithm float x1 = start->row, y1 = start->col, x2 = end->row, y2 = end->col; //CHECK: these values are actually short const bool steep = (fabs(y2 - y1) > fabs(x2 - x1)); if(steep) { my_swap(x1, y1); my_swap(x2, y2); } if(x1 > x2) { my_swap(x1, x2); my_swap(y1, y2); } const float dx = x2 - x1; const float dy = fabs(y2 - y1); float error = dx / 2.0f; const int ystep = (y1 < y2) ? 1 : -1; int y = (int)y1; const int maxX = (int)x2; int steps = 0; for(int x=(int)x1; x<maxX; x++) { if(steep) { allPointsInLine[steps].row = (int)y; allPointsInLine[steps].col = (int)x; } else { allPointsInLine[steps].row = (int)x; allPointsInLine[steps].col = (int)y; } error -= dy; if(error < 0) { y += ystep; error += dx; } steps++; } return steps; } __global__ void calculateViewshed(Terrain terrain, unsigned char* d_viewshed, Point *viewshedBoundary, int boundarySize, int size, int iter, int r, Point* d_pointsOnLine, short sizeOfLine){ unsigned long long int observerIndex = blockDim.x * blockIdx.x + threadIdx.x; // unsigned long long int yIndex = blockDim.y * blockIdx.y + threadIdx.y; observerIndex = observerIndex + iter; if (observerIndex >= iter + r){ // printf("exiting %llu\n", observerIndex); return; } // printf("%llu\n", observerIndex); unsigned long long int viewshedIndex = terrain.nRows * terrain.nCols * (observerIndex - iter); Point observerPoint = terrain.towerLocations[observerIndex]; //skip no data values if(terrain.gridTerrian[observerPoint.row * terrain.nCols + observerPoint.col].elevation == terrain.noDataValue){ return; } for(int i = 0; i < boundarySize; i++){ int stepsOnLine = getPointsOnLine(&observerPoint, &viewshedBoundary[i], &d_pointsOnLine[(observerIndex - iter) * sizeOfLine], terrain.nCols); double maxGradientBetweenObserverAndTarget = PTRDIFF_MIN; //for(int j = stepsOnLine - 1; j >= 0; j--){ for(int j = 1; j < stepsOnLine; j++){ unsigned long long int grid_index = (terrain.nCols * d_pointsOnLine[(observerIndex - iter) * sizeOfLine +j].row) + d_pointsOnLine[(observerIndex - iter) * sizeOfLine +j].col; if(terrain.gridTerrian[grid_index].elevation != terrain.noDataValue){ double gradient = calculateGradientOnLine(terrain.gridTerrian, &observerPoint, &d_pointsOnLine[(observerIndex - iter) * sizeOfLine +j], terrain.nCols); double gradientWithoutTowerHeight = calculateGradientOnLineWithoutTowerHeight(terrain.gridTerrian, &observerPoint, &d_pointsOnLine[(observerIndex - iter) * sizeOfLine +j], terrain.nCols); // if(observerIndex == 0 && i == 19){ // printf("%d,%f,%f\n",j, gradient, gradientWithoutTowerHeight); // } if(gradient >= maxGradientBetweenObserverAndTarget){ setBit(d_viewshed, viewshedIndex + grid_index); maxGradientBetweenObserverAndTarget = gradientWithoutTowerHeight; // maxGradientBetweenObserverAndTarget = gradient; } } } //calculating for the last point unsigned long long int grid_index = (terrain.nCols * viewshedBoundary[i].row) + viewshedBoundary[i].col; if(terrain.gridTerrian[grid_index].elevation != terrain.noDataValue){ double gradient = calculateGradientOnLine(terrain.gridTerrian, &observerPoint, &viewshedBoundary[i], terrain.nCols); double gradientWithoutTowerHeight = calculateGradientOnLineWithoutTowerHeight(terrain.gridTerrian, &observerPoint, &viewshedBoundary[i], terrain.nCols); if(gradient >= maxGradientBetweenObserverAndTarget){ setBit(d_viewshed, viewshedIndex + grid_index); maxGradientBetweenObserverAndTarget = gradientWithoutTowerHeight; } } } } int getIndexOfTower(Point *towerLocations, int size, int row, int col){ int index = -1; for(int i = 0; i < size; i++){ if(towerLocations[i].row == row && towerLocations[i].col == col){ index = i; } } return index; } int main(int argc, char* argv[]){ int dev = 1; CUDA_CALL(cudaSetDevice, dev); cudaError_t err = cudaSuccess; Terrain h_terrain; Pixel *d_pixel_grid, *h_pixel_grid; Point *d_towers, *h_towers; if(argc < 2){ cout << "Not enough arguments!" << endl; cout << "example: ./main /path/to/file" << endl; return 1; } cout << "Reading Input File...." << endl; readFileAndReturnTerrain(&h_terrain, argv[1], atoi(argv[3]), argv[5]); //exit(1); cout << "After file output" << endl; h_pixel_grid = h_terrain.gridTerrian; h_towers = h_terrain.towerLocations; int THREADS = atoi(argv[2]); //4 unsigned long long int totalThreads = THREADS; cout << "totalThreads: " << totalThreads << endl; cout << "nRows: " << h_terrain.nRows << ", nCols: " << h_terrain.nCols << endl; float lenGlobal = (float)(h_terrain.nRows*h_terrain.nCols)/8; std::cout << "MemLength " << lenGlobal<< std::endl; printCudaMemory((char*)"1"); CUDA_CALL(cudaMalloc, (void**)&d_pixel_grid, (h_terrain.nRows * h_terrain.nCols) * sizeof(Pixel)); CUDA_CALL(cudaMemcpy, d_pixel_grid, h_pixel_grid, (h_terrain.nRows * h_terrain.nCols) * sizeof(Pixel), cudaMemcpyHostToDevice); printCudaMemory((char*)"2"); CUDA_CALL(cudaMalloc, (void**)&d_towers, totalThreads * sizeof(Point)); CUDA_CALL(cudaMemcpy, d_towers, h_towers, totalThreads * sizeof(Point), cudaMemcpyHostToDevice); printCudaMemory((char*)"3"); h_terrain.towerLocations = d_towers; h_terrain.gridTerrian = d_pixel_grid; int boundarySize = (h_terrain.nCols + h_terrain.nRows) / STEPSIZE; Point *h_viewshedBoundary, *d_viewshedBoundary; h_viewshedBoundary = (Point*) malloc( ((boundarySize) * 2) * sizeof(Point)); int totalBoundarySize = getBoundaryAroundObserver(h_terrain.nRows, h_terrain.nCols, h_viewshedBoundary); CUDA_CALL(cudaMalloc, (void**)&d_viewshedBoundary, ((boundarySize) * 2) * sizeof(Point)); CUDA_CALL(cudaMemcpy, d_viewshedBoundary, h_viewshedBoundary, ((boundarySize ) * 2) * sizeof(Point), cudaMemcpyHostToDevice); printCudaMemory((char*)"4"); cout << "Total Boundary Size: " << totalBoundarySize << endl; cout << "Starting Kernel with # of threads " << totalThreads<<endl; size_t heap; CUDA_CALL(cudaDeviceGetLimit, &heap, cudaLimitMallocHeapSize); cout << "Heap size before = " << heap << endl; // this is dirty fix // CUDA_CALL(cudaDeviceSetLimit, cudaLimitMallocHeapSize, heap*max(1,THREADS/1000)); // CUDA_CALL(cudaDeviceGetLimit, &heap, cudaLimitMallocHeapSize); cout << "Heap size after = " << heap << endl; cudaDeviceProp myCUDA; if (cudaGetDeviceProperties(&myCUDA, dev) == cudaSuccess) { printf("Using device %d:\n", dev); printf("%s; global mem: %zdByte; compute v%d.%d; clock: %d kHz\n", myCUDA.name, myCUDA.totalGlobalMem, (int)myCUDA.major, (int)myCUDA.minor, (int)myCUDA.clockRate); } int threadsPerBlock = myCUDA.maxThreadsPerBlock; int blocksPerGrid = (totalThreads + threadsPerBlock - 1) / threadsPerBlock; cout << "Maximum threads per block = " << threadsPerBlock << endl; cout << "Blocks per Grid = " << blocksPerGrid << endl; cout << "Size of global viewshed" << ceil(totalThreads * lenGlobal *sizeof(unsigned char)) << endl; unsigned char *g_viewshed = (unsigned char *) calloc(ceil(totalThreads * lenGlobal *sizeof(unsigned char)), sizeof(unsigned char)); if (g_viewshed == NULL) { std::cout << "Memory allocation failed" << std::endl; exit(1); } unsigned long long int MAX_ELEMENTS = 250000UL * 250000UL; //25600000000; // 500x500 int r = max(int(MAX_ELEMENTS / totalThreads), 1); //make sure we process at least 1 row printf("Rows to process in each iteration %d\n", r); printf("Iterations %f\n", ceil(totalThreads * totalThreads / MAX_ELEMENTS)); for (int i = 0; i < totalThreads; i+=r) { //step float len = (float)(totalThreads)/8; printf("Start Iteration %d, %f , %f\n", i/r,len,ceil(r * len *sizeof(unsigned char)) ); // std::cout << r << " " << len << " " << ceil(r * len *sizeof(unsigned char)) << std::endl; // we are in the last thread, only process the remaining viewsheds if (i+r > totalThreads) { r = totalThreads - i; // std::cout << "Remainder " << r << std::endl; } unsigned char *h_viewshed, *d_viewshed; // round up to the next full byte h_viewshed = (unsigned char *) calloc(ceil(r * len *sizeof(unsigned char)), sizeof(unsigned char)); // cout << "viewshed size " << r * len *sizeof(unsigned char) * 8 << " elements --> allocated memory: " << ceil(r * len *sizeof(unsigned char)) << " bytes" << endl; // std::cout << "MemLength~~ " <<ceil(r * len *sizeof(unsigned char)) << std::endl; CUDA_CALL(cudaMalloc, (unsigned char**)&d_viewshed, (ceil(r * len *sizeof(unsigned char)))); CUDA_CALL(cudaMemcpy, d_viewshed, h_viewshed, ceil(r* len *sizeof(unsigned char)), cudaMemcpyHostToDevice); printCudaMemory((char*)"Viewshed"); // allocate memory for the max number of possible points, i.e. map size int sizeOfLine = sqrt(pow(h_terrain.nRows , 2.0) + pow(h_terrain.nCols, 2.0) ); Point *h_pointsOnLine,*d_pointsOnLine; h_pointsOnLine = (Point*)malloc(r * sizeOfLine * sizeof(Point)); //FIXME CUDA_CALL(cudaMalloc, (Point **)&d_pointsOnLine, (r * sizeOfLine * sizeof(Point))); CUDA_CALL(cudaMemcpy, d_pointsOnLine, h_pointsOnLine, r* sizeOfLine *sizeof(Point), cudaMemcpyHostToDevice); printCudaMemory((char*)"Viewshed"); std::cout << "Iteration Start" << std::endl; Print_Time(); // calculateViewshed<<<blocksPerGrid, threadsPerBlock>>>(h_terrain, d_viewshed, d_viewshedBoundary, totalBoundarySize, totalThreads, i, r); calculateViewshed<<<blocksPerGrid, threadsPerBlock>>>(h_terrain, d_viewshed, d_viewshedBoundary, totalBoundarySize, totalThreads, i, r, d_pointsOnLine, sizeOfLine); cudaError_t errSync = cudaGetLastError(); if (errSync != cudaSuccess) { printf("Sync kernel error: %s\n", cudaGetErrorString(errSync)); exit(EXIT_FAILURE); } cout << "Waiting for all jobs to finish..." << endl; cudaError_t errAsync = cudaDeviceSynchronize(); if (errAsync != cudaSuccess) { printf("Async kernel error: %s\n", cudaGetErrorString(errAsync)); exit(EXIT_FAILURE); } std::cout << "Iteration stop" << std::endl; Print_Time(); cout << "Copy viewsheds from device to host" << endl; err = cudaMemcpy(h_viewshed, d_viewshed, ceil(r* len *sizeof(unsigned char)), cudaMemcpyDeviceToHost); if (err != cudaSuccess){ cout << "Failed to copy viewsheds grid from device to host. Error String " << cudaGetErrorString(err) << err << endl; exit(EXIT_FAILURE); } cout << "Viewshed calculation complete" << endl; // Print_Time(); // memcpy(g_viewshed + (r * len * sizeof(unsigned char)), h_viewshed, ceil(r* len *sizeof(unsigned char))); // exit(1); // commenting it due to seg fault after iteration 1 unsigned long long int count = 0; for(unsigned long long int k = (i * totalThreads); k < ( (unsigned long long int)(i * totalThreads) + (r * totalThreads) ); k++){ // std::cout << k << " " <<getBit(h_viewshed, count)<< std::endl; // g_viewshed.set(k, getBit(h_viewshed, count)); if(getBit(h_viewshed, count)){ // std::cout << k << std::endl; setBitHost(g_viewshed,k); } count++; // g_viewshed[k] = getBit(h_viewshed, j * (h_terrain.nRows * h_terrain.nCols) + k) } // FILE *fp; // char buff[10]; // sprintf(buff,"%d",i); // fp = fopen(buff, "wb"); // // long tRows = (h_terrain.nRows * h_terrain.nCols); // for(int j = r-1; j < r; j++){ // for(int k = 0; k < totalThreads; k++){ // // unsigned long long int index = ((h_terrain.nRows * h_terrain.nCols) * (unsigned long long int) indexOfTower) + ((h_terrain.nCols * j) + k); // unsigned long long int mainIndex = j * totalThreads + k; // if(getBit(h_viewshed, mainIndex)){ // // cout << "1 " << mainIndex << endl; // fprintf(fp, "1 "); // } // else{ // // cout << "0 " << mainIndex << endl; // fprintf(fp, "0 "); // } // } // fprintf(fp, "\n"); // } // fclose(fp); free(h_viewshed); free(h_pointsOnLine); cudaFree(d_pointsOnLine); cudaFree(d_viewshed); } // std::cout << "Writing to file..." << std::endl; // FILE *fp; // fp = fopen("out.txt", "wb"); // for(int j = 0; j < totalThreads; j++){ // for(int k = 0; k < totalThreads; k++){ // // unsigned long long int index = ((h_terrain.nRows * h_terrain.nCols) * (unsigned long long int) indexOfTower) + ((h_terrain.nCols * j) + k); // if(getBit(g_viewshed, j * (h_terrain.nRows * h_terrain.nCols) + k)){ // fprintf(fp, "1 "); // } // else // fprintf(fp, "0 "); // } // fprintf(fp, "\n"); // } // fclose(fp); // for(int j = 0; j < totalThreads; j++){ // for(int k = 0; k < totalThreads; k++){ // // unsigned long long int index = ((h_terrain.nRows * h_terrain.nCols) * (unsigned long long int) indexOfTower) + ((h_terrain.nCols * j) + k); // if(getBit(g_viewshed, j * (h_terrain.nRows * h_terrain.nCols) + k)){ // cout << "1 "; // } // else{ // cout << "0 "; // } // } // cout << endl; // } std::cout << "Writing to binary file..." << std::endl; FILE * write_ptr; write_ptr = fopen(argv[4],"wb"); fwrite(g_viewshed, sizeof(unsigned char), ceil(totalThreads * lenGlobal), write_ptr); fclose(write_ptr); free(g_viewshed); cudaFree(d_pixel_grid); cudaFree(d_towers); cudaFree(d_viewshedBoundary); free(h_pixel_grid); free(h_towers); free(h_viewshedBoundary); // reset the device err = cudaDeviceReset(); return 0; }
22,039
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda_runtime.h> #include <fstream> #include <chrono> #include <iostream> __global__ void vectorAdd(const double *A, const double *B, double *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } __global__ void addVectorsInto( double *a, double *b,double *result, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } inline cudaError_t checkCUDA(cudaError_t result){ if(result != cudaSuccess){ fprintf(stderr, "CUDA Runtime error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } void vectorAddNorm(int N, int numberOfSMs){ double* A, *B, *result; size_t size = N * sizeof(double); checkCUDA(cudaMallocManaged(&A, size)); checkCUDA(cudaMallocManaged(&B, size)); checkCUDA(cudaMallocManaged(&result, size)); for(int j = 0; j < N; j++){ A[j] = static_cast<double>(rand())/(RAND_MAX/1.); B[j] = static_cast<double>(rand())/(RAND_MAX/1.); result[j] = 0; } //vectorAdd<<<(N / threads_per_block.x) + 1, 256>>>(A, B, result, N); addVectorsInto<<<32* numberOfSMs, 256>>>(A, B, result, N); cudaDeviceSynchronize(); checkCUDA(cudaFree(A)); checkCUDA(cudaFree(B)); checkCUDA(cudaFree(result)); } void vectorAddPre(int N, int numberOfSMs, int deviceId, int pre){ double* A, *B, *result; size_t size = N * sizeof(double); checkCUDA(cudaMallocManaged(&A, size)); checkCUDA(cudaMallocManaged(&B, size)); checkCUDA(cudaMallocManaged(&result, size)); switch(pre){ case 3:{ cudaMemPrefetchAsync(result, size, deviceId); cudaMemPrefetchAsync(B, size, deviceId); cudaMemPrefetchAsync(A, size, deviceId); break; } case 2:{ cudaMemPrefetchAsync(B, size, deviceId); cudaMemPrefetchAsync(A, size, deviceId); break; } case 1:{ cudaMemPrefetchAsync(A, size, deviceId); break; } default: break; } for(int j = 0; j < N; j++){ A[j] = static_cast<double>(rand())/(RAND_MAX/1.); B[j] = static_cast<double>(rand())/(RAND_MAX/1.); result[j] = 0; } //cudaMemPrefetchAsync(A, size, deviceId); //vectorAdd<<<(N / 256) + 1, 256>>>(A, B, result, N); addVectorsInto<<<32* numberOfSMs, 256>>>(A, B, result, N); cudaDeviceSynchronize(); checkCUDA(cudaFree(A)); checkCUDA(cudaFree(B)); checkCUDA(cudaFree(result)); } int main(){ std::ofstream save; std::chrono::system_clock::time_point start; std::chrono::system_clock::time_point stop; std::chrono::duration<double> elapsed_time; int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs); save.open("res.txt"); // for(int p = 0; p < 4; p++){ // for(int i = 10; i < 1e9 ; i*=10){ int i = 1e8; int p = 3; start = std::chrono::high_resolution_clock::now(); vectorAddPre(i,numberOfSMs,deviceId,p); stop = std::chrono::high_resolution_clock::now(); elapsed_time = stop - start; save << p <<"\t"<< i <<"\t"<<elapsed_time.count() << std::endl; std::cout << p <<"\t"<< i <<"\t"<<elapsed_time.count() << std::endl; // } // } save.close(); return 0; }
22,040
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include <vector> #include <cstdlib> #include <ctime> #include <math.h> #include <assert.h> struct LU { LU() = default; std::vector<double> L; std::vector<double> U; size_t n; }; // Borrowed from the transformations example code // https://docs.nvidia.com/cuda/thrust/index.html#transformations struct daxpy_functor { const double a; daxpy_functor(double _a) : a(_a) {} __host__ __device__ double operator()(const double& x, const double&y) const { return a * x + y; } }; void daxpy(double A, thrust::device_vector<double>& X, thrust::device_vector<double>& Y) { thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), daxpy_functor(A)); } struct to_zero_functor { const double epsilon; to_zero_functor(double _epsilon) : epsilon(_epsilon) {} __host__ __device__ double operator()(const double& x) const { if (std::abs(x) <= epsilon) return 0; else return x; } }; void to_zero(double epsilon, thrust::device_vector<double>& X) { thrust::transform(X.begin(), X.end(), X.begin(), to_zero_functor(epsilon)); } LU LU_factorization(const std::vector<double>& A, size_t n); void print_matrix(const std::vector<double>& A, size_t n); double rand_0_1(void); int main(int argc, char **argv) { int n = 0; if (argc >= 2) { n = atoi(argv[1]); } else { n = 1024; } std::vector<double> A; A.resize(n*n); std::srand(std::time(nullptr)); std::generate(A.begin(), A.end(), rand_0_1); auto factored = LU_factorization(A, n); return 0; } double rand_0_1(void) { return ((double) rand() / (RAND_MAX)); } void print_matrix(const std::vector<double>& A, size_t n) { for (size_t row = 0; row < n; ++row) { for (size_t col = 0; col < n; ++col) { std::cout << A[n*row+col] << " "; } std::cout << std::endl; } std::cout << std::endl; } LU LU_factorization(const std::vector<double>& A, const size_t n) { assert(A.size() == n*n); std::vector<double> U(n*n); std::vector<double> L(n*n, 0); // Initialize them std::copy(A.begin(), A.end(), U.begin()); for (size_t i = 0; i < n; ++i) { L[i*(n+1)] = 1.0; } thrust::host_vector<double> top_row_host(n); thrust::device_vector<double> top_row_dev(n); thrust::host_vector<double> reducing_row_host(n); thrust::device_vector<double> reducing_row_dev(n); for (size_t col = 0; col < n-1; ++col) { std::copy(U.begin()+(col*n), U.begin()+((col+1)*n), top_row_host.begin()); top_row_dev = top_row_host; for (int row = col+1; row < n; ++row) { size_t num_coeff = row*n+col; size_t den_coeff = col*n+col; double coeff = -(U[num_coeff] / U[den_coeff]); L[num_coeff] = coeff; // Copy the Rows to the host vector, then device vector size_t start_loc = row*n+col; size_t end_loc = start_loc+(n-col); thrust::fill(reducing_row_host.begin(), reducing_row_host.begin()+col, 0); std::copy(U.begin()+(start_loc), U.begin()+(end_loc), reducing_row_host.begin()+col); reducing_row_dev = reducing_row_host; // Scale and add daxpy(coeff, top_row_dev, reducing_row_dev); reducing_row_host = reducing_row_dev; thrust::copy(reducing_row_host.begin()+col, reducing_row_host.end(), U.begin()+start_loc); } } // now round down the zeros given some threshold thrust::host_vector<double> final_host(n*n); std::copy(U.begin(), U.end(), final_host.begin()); thrust::device_vector<double> final_dev = final_host; to_zero(1e-12, final_dev); final_host = final_dev; thrust::copy(final_host.begin(), final_host.end(), U.begin()); LU retval; retval.U = U; retval.L = L; return retval; }
22,041
#include <thrust/host_vector.h> #include <thrust/fill.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <iostream> template <typename T> double funcao_que_recebe_device_ou_host(T v) { thrust::fill(v.begin(), v.end(), 0.4); double s = thrust::reduce(v.begin(), v.end(), 0.0, thrust::plus<double>()); return s; } int main() { thrust::host_vector<double> vec(10); double s = funcao_que_recebe_device_ou_host(vec); std::cout << s << "\n"; }
22,042
__global__ void advectVelocities(const float dt, const float * d_levelset, const float * d_velIn_x, const float * d_velIn_y, float * d_velOut_x, float * d_velOut_y) { } void advectVelocities(dim3 blocks, dim3 threads,const float dt, const float * d_levelset, const float * d_velIn_x, const float * d_velIn_y, float * d_velOut_x, float * d_velOut_y) { advectVelocities<<<blocks, threads>>> (dt, d_levelset,d_velIn_x, d_velIn_y,d_velOut_x, d_velOut_y); }
22,043
// // 【normalize_vector】 // // 概要: ベクトルの正規化関数サンプル // 参考: // CUDA for Engineers: An Introduction to High-Performance Parallel Computing // #include <thrust/device_vector.h> #include <thrust/inner_product.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <cmath> #include <iostream> using namespace std; void normalize(thrust::device_vector<double> &v, double alpha){ using namespace thrust::placeholders; thrust::transform(v.begin(), v.end(), v.begin(), _1 *= alpha); } int main(){ thrust::device_vector<double> vec(2); vec[0] = 1.0; vec[1] = 2.0; // ベクトルの正規化 normalize(vec, 0.5); // 確認 for(int i = 0; i < vec.size(); i++){ cout << "vec[" << i << "] = " << vec[i] << endl; } }
22,044
#include<stdio.h> #include<cuda.h>
22,045
#include "includes.h" __global__ void Round(float * A, float *out, int size) { int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; if (id < size) { int t = (int)(out[id] + 0.5); // can it be speeded up?? out[id] = (float)t; } }
22,046
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> using namespace std; #include <cuda_runtime.h> #include <curand_kernel.h> #define NUM_THREADS 1024 cudaStream_t stream; int features = 1024; int sampels = 10000; int classes = 10; float ** training_x; //3500 * 784 float ** training_y; //3500 * 1 float ** testing_x; //145 * 784 float ** testing_y; //145 * 1 float ** label_onehot; //3500 * 10 void getData(float * res, char buff[]) { char *token = strtok(buff," ,"); int counter=0; while( token != NULL ) { counter++; res[counter-1] = atof(token); token = strtok(NULL," ,"); } } void readCSV(char* file, float** mat, int x_dim, int y_dim) { FILE* stream = fopen(file, "r"); int size_per_pic = y_dim * 30; char line[size_per_pic]; int num; if (stream == NULL) { perror ("Error opening file"); return; } int i = 0; while (fgets(line, size_per_pic, stream)) { char* tmp = strdup(line); getData(mat[i], tmp); i++; } } void malloc_host(void){ training_x = (float**)malloc(sizeof(float*) * 10000); for(int i = 0; i < 10000; i++){ training_x[i] = (float*)malloc(sizeof(float) * 1024); } training_y = (float**)malloc(sizeof(float*) * 10000); for(int i = 0; i < 10000; i++){ training_y[i] = (float*)malloc(sizeof(float) * 1); } testing_x = (float **)malloc(sizeof(float*) * 2000); for(int i = 0; i < 2000; i++){ testing_x[i] = (float*)malloc(sizeof(float) * 1024); } testing_y = (float **)malloc(sizeof(float*) * 2000); for(int i = 0; i < 2000; i++){ testing_y[i] = (float*)malloc(sizeof(float) * 1); } label_onehot = (float **)malloc(sizeof(float*) * 10000); for (int i = 0; i < 10000; i++) { label_onehot[i] = (float*)malloc(sizeof(float) * 10); } } __global__ void Mult_GPU( float *a, float *b, float *result, const int M, const int N, const int S) // M should be batch size { int threadId = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (threadId < M * S) { int row = threadId / S; int column = threadId % S; result[threadId] = 0; for (int i = 0; i < N; i++) { result[threadId] += a[row * N + i] * b[i * S + column]; } } } __global__ void softmax_sum( float *predict, float *sum, const int label_size, const int data_size ){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ sum[tid] += exp(predict[tid * label_size + i]); } } } __global__ void max( float *predict, float *max, const int label_size, const int data_size ){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ int max_index = 0; max[tid] = predict[tid * label_size]; if(predict[tid * label_size + max_index] < predict[tid * label_size + i]){ max[tid] = predict[tid * label_size + i]; } } } } __global__ void normalize(float *predict, float *max, const int label_size, const int data_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ predict[tid * label_size + i] -= max[tid]; } } } __global__ void softmax( float *softmax_value, float *predict, float *sum,const int label_size, const int data_size ){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ softmax_value[tid * label_size + i] = exp(predict[tid * label_size + i]) / sum[tid]; } } } __global__ void dz(float *softmax_value, float *label, float *dz, const int label_size, const int data_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ dz[tid * label_size + i] = softmax_value[tid * label_size + i] - label[tid * label_size + i]; } } } __global__ void grad(float *train_data, float *dz, float *grad, const int label_size, const int data_size, const int weight_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < weight_size){ for(int i = 0; i < label_size; i++){ for(int j = 0; j < data_size; j++) grad[tid * label_size + i] += train_data[j * weight_size + tid] * grad[j * label_size + i]; } } } __global__ void weight_update(float *weight, float *grad, const int label_size, const int weight_size, const float learning_rate){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < weight_size){ for(int i = 0; i < label_size; i++){ grad[tid * label_size + i] /= 10000; weight[tid * label_size + i] -= (learning_rate * grad[tid * label_size + i]); } } } __global__ void initialize_dz(float *dz, const int label_size, const int data_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ dz[tid * label_size + i] = 0; } } } __global__ void initialize_grad(float *grad, const int label_size, const int weight_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < weight_size){ for(int i = 0; i < label_size; i++){ grad[tid * label_size + i] = 0; } } } __global__ void initialize(float *sum, float *predict, const int data_size, const int label_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ sum[tid] = 0; for(int i = 0; i < label_size; i++){ predict[tid * label_size + i] = 0; } } } int main(){ // malloc_host(); malloc_host(); readCSV("training_x.csv", training_x, 10000,1024); readCSV("training_y.csv", training_y, 1024, 1); readCSV("testing_x.csv", testing_x, 2000, 1024); readCSV("testing_y.csv", testing_y, 2000, 1); printf("label %f\n", training_y[9998][0]); float learning_rate = 0.01; int iter = 1; int data_size = 10000; int label_size = 10; int weight_size = 1024; int tratin_data_bytes = 10000 * 1024 * sizeof(float); int weight_bytes = 1024 * 10 * sizeof(float); int predict_bytes = 10000 * 10 * sizeof(float); float *h_train_data = (float *) malloc( tratin_data_bytes ) ; float *h_train_data_T = (float *) malloc( tratin_data_bytes ) ; float *h_label_onehot = (float *) malloc( predict_bytes ) ; float *h_weight = (float *) malloc( weight_bytes ) ; float *h_predict = (float *) malloc( predict_bytes ) ; float *h_max = (float *) malloc( 10000 * sizeof(float) ) ; float *h_sum = (float *) malloc( 10000 * sizeof(float) ) ; float *h_softmax = (float *) malloc( predict_bytes ) ; float *h_dz = (float *) malloc( predict_bytes ) ; float *h_grad = (float *) malloc( weight_bytes ) ; ////////////////////// Initialize ////////////////////// ////////////////////// One Hot ////////////////////// for(int i = 0; i < data_size; i++){ for(int j = 0; j < weight_size; j++){ h_train_data_T[j * 10000 + i] = training_x[i][j]; } } for(int i = 0; i < data_size; i++){ label_onehot[i][(int(training_y[i][0] - 1))] = 1; if(i == 1){ printf("training_y : %f\n", training_y[1][0]); for(int j = 0; j < 10; j++) printf("onehot : %f\n", label_onehot[i][j]); } } for(int i = 0; i < data_size; i++){ for(int j = 0; j < label_size; j++){ h_label_onehot[i * label_size + j] = label_onehot[i][j]; } } for(int i = 0; i < data_size; i++){ for(int j = 0; j < weight_size; j++){ h_train_data[i * weight_size + j] = training_x[i][j]; } } for(int i = 0; i < weight_size; i++){ for(int j = 0; j < label_size; j++){ h_weight[i * label_size + j] = 1 ; } } //////////////////// Initialize ////////////////////// ///////////////////////////////// GPU_SIDE /////////////////////////////////// float *d_train_data, *d_train_data_T, *d_label, * d_weight, *d_predict, *d_predict_sum, *d_sum, *d_max, *d_softmax_value; float *d_dz, *d_grad; cudaGetErrorString(cudaMalloc( (void **) &d_train_data, tratin_data_bytes )) ; cudaGetErrorString(cudaMalloc( (void **) &d_train_data_T, tratin_data_bytes )) ; cudaGetErrorString(cudaMalloc( (void **) &d_label, predict_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_predict, predict_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_weight, weight_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_sum, 10000 * sizeof(float))) ; cudaGetErrorString(cudaMalloc( (void **) &d_softmax_value, predict_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_dz, predict_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_grad, weight_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_max, 10000 * sizeof(float))) ; // //Configure blockDim int bdx = 32, bdy = 32; while(data_size > bdx * 65535) { bdx = bdx * 2; bdy = bdy / 2; } while(weight_size > bdy * 65535) { bdy = bdy * 2; bdx = bdx / 2; } dim3 blockDim( bdx,bdy ) ; // you will want to configure this dim3 gridDim( (int)((data_size + blockDim.x-1)/blockDim.x), (int)((weight_size + blockDim.y-1)/blockDim.y) ) ; //////////////////////////////// invoke Kernel (Logistic Regression) //////////////////////////////// cudaGetErrorString(cudaMemcpy( d_train_data_T, h_train_data_T, tratin_data_bytes, cudaMemcpyHostToDevice )) ; cudaGetErrorString(cudaMemcpy( d_train_data, h_train_data, tratin_data_bytes, cudaMemcpyHostToDevice )) ; cudaGetErrorString(cudaMemcpy( d_weight, h_weight, weight_bytes, cudaMemcpyHostToDevice )) ; cudaGetErrorString(cudaMemcpy( d_label, h_label_onehot, predict_bytes, cudaMemcpyHostToDevice )) ; for(int train = 0; train < 10; train++){ //Initialize initialize<<<gridDim, blockDim>>>(d_sum, d_predict, data_size, label_size); cudaGetErrorString(cudaDeviceSynchronize()); initialize_dz<<<gridDim, blockDim>>>(d_dz, label_size, data_size); cudaGetErrorString(cudaDeviceSynchronize()); initialize_grad<<<gridDim, blockDim>>>(d_grad, label_size, weight_size); cudaGetErrorString(cudaDeviceSynchronize()); // DOT Mult_GPU<<<gridDim, blockDim>>>( d_train_data, d_weight, d_predict, data_size, weight_size, label_size) ; cudaGetErrorString(cudaDeviceSynchronize()); max<<<gridDim, blockDim>>>( d_predict, d_max, label_size, data_size ); cudaGetErrorString(cudaDeviceSynchronize()); normalize<<<gridDim, blockDim>>>(d_predict, d_max, label_size, data_size); cudaGetErrorString(cudaDeviceSynchronize()); // Softmax softmax_sum<<<gridDim, blockDim>>>( d_predict, d_sum, label_size, data_size ); cudaGetErrorString(cudaDeviceSynchronize()); softmax<<<gridDim, blockDim>>>( d_softmax_value, d_predict, d_sum, label_size, data_size ); cudaGetErrorString(cudaDeviceSynchronize()); // Weight Update dz<<<gridDim, blockDim>>>(d_softmax_value, d_label, d_dz, label_size, data_size); cudaGetErrorString(cudaDeviceSynchronize()); Mult_GPU<<<gridDim, blockDim>>>( d_train_data_T, d_dz, d_grad, weight_size, data_size, label_size) ; cudaGetErrorString(cudaDeviceSynchronize()); weight_update<<<gridDim, blockDim>>>(d_weight, d_grad, label_size, weight_size, learning_rate); cudaGetErrorString(cudaDeviceSynchronize()); } /////////////////////// Test ////////////////////////// cudaGetErrorString(cudaMemcpy( h_predict, d_predict, predict_bytes, cudaMemcpyDeviceToHost )) ; cudaGetErrorString(cudaMemcpy( h_softmax, d_softmax_value, predict_bytes, cudaMemcpyDeviceToHost )) ; cudaGetErrorString(cudaMemcpy( h_weight, d_weight, weight_bytes, cudaMemcpyDeviceToHost )) ; cudaGetErrorString(cudaMemcpy( h_max, d_max, 10000 * sizeof(float), cudaMemcpyDeviceToHost )) ; // int count = 0; // for(int i = 0; i < data_size; i++){ // if(h_sum[i] == 10.0) count++; // printf("max : %f\n", h_sum[i]); // } // printf("count 10 num = %d\n", count); for(int i = 0; i < weight_size * label_size; i++){ printf("dz: %f\n", h_weight[i]); } // for(int i = 0; i < 10000; i++){ // printf("max: %f\n", log(h_max[i]) ); // } // for(int i = 0; i < data_size * label_size; i++){ // // printf("softmax: %f\n", log(h_sum[i / label_size]) ); // printf("softmax: %f\n", h_softmax[i] ); // } // float total_error = 0; // for(int i = 0; i < data_size; i++){ // for(int j = 0; j < label_size; j++){ // total_error -= label_onehot[i][j] * log(h_softmax[i * label_size + j]) ; // } // } // printf("error: %f\n", total_error ); }
22,047
#include <stdio.h> void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs); const int N = 2<<24; size_t size = N * sizeof(float); float *a; float *b; float *c; cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); /* * Add asynchronous prefetching after the data is initialized, * and before launching the kernel, to avoid host to GPU page * faulting. */ cudaMemPrefetchAsync(a, size, deviceId); cudaMemPrefetchAsync(b, size, deviceId); cudaMemPrefetchAsync(c, size, deviceId); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; cudaError_t addVectorsErr; cudaError_t asyncErr; addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N); addVectorsErr = cudaGetLastError(); if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr)); asyncErr = cudaDeviceSynchronize(); if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
22,048
/* * Copyright 2011-2015 NVIDIA Corporation. All rights reserved * * Sample app to demonstrate use of CUPTI library to obtain metric values * using callbacks for CUDA runtime APIs * */ #include <stdio.h> #include <cuda.h> #define DRIVER_API_CALL(apiFuncCall) \ do { \ CUresult _status = apiFuncCall; \ if (_status != CUDA_SUCCESS) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \ __FILE__, __LINE__, #apiFuncCall, _status); \ exit(-1); \ } \ } while (0) #define RUNTIME_API_CALL(apiFuncCall) \ do { \ cudaError_t _status = apiFuncCall; \ if (_status != cudaSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\ exit(-1); \ } \ } while (0) #define TILE_DIM 64 #define BLOCK_ROWS 8 #define NUM_REPS 100 // Device code __global__ void VecAdd(const float* A, const float* B, float* C, int N) { __shared__ float tile[TILE_DIM]; tile[threadIdx.x] = A[threadIdx.x]; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } static void initVec(float *vec, int n) { for (int i=0; i< n; i++) vec[i] = i; } static void cleanUp(float *h_A, float *h_B, float *h_C, float *d_A, float *d_B, float *d_C) { if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } static void runPass() { int N = 10240 * 10240; size_t size = N * sizeof(float); int threadsPerBlock = 0; int blocksPerGrid = 0; float *h_A, *h_B, *h_C; float *d_A, *d_B, *d_C; int i, sum; // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); // Initialize input vectors initVec(h_A, N); initVec(h_B, N); memset(h_C, 0, size); // Allocate vectors in device memory cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); cudaMemset(d_A, 0, size); cudaMemset(d_B, 0, size); // Invoke kernel threadsPerBlock = 64; blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; printf("Launching kernel: blocks %d, thread/block %d\n", blocksPerGrid, threadsPerBlock); VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); // Copy result from device memory to host memory // h_C contains the result in host memory cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); cleanUp(h_A, h_B, h_C, d_A, d_B, d_C); } int main(int argc, char *argv[]) { runPass(); return 0; }
22,049
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <errno.h> #include <limits.h> #include <string.h> #include <time.h> #include <cuda.h> #define GIG 1000000000 #define CPG 2.4 // Cycles per GHz -- Adjust to your computer #define GPU_BLOCK_SIZE 128 typedef float pr_type_t; typedef struct ad_vert { long vertex_num; struct ad_vert *next; }adj_vert_t; typedef struct { pr_type_t curr_page_rank; pr_type_t next_page_rank; long num_adj_nodes; adj_vert_t *last_node_addr; void *next; }vertex_t; typedef struct { long edge_index; char is_leaf; }compact_adj_node_t; typedef struct { pr_type_t next; pr_type_t curr; }p_rank_struct_t; pr_type_t epsilon; pr_type_t rand_hop = 0.15; __device__ pr_type_t d_rand_hop = 0.15; #define GRAPH_FILE_SEPERATOR " ,;" #define MAX_LINE_LEN 100 #define RAND_HOP_LIKELIHOOD(r_hop_prob, nvert) ((r_hop_prob) / (nvert)) #define TRAV_LIKELIHOOD(r_hop_prob, p, index, num_adj_nodes) ((1 - (r_hop_prob)) * (p)[index].curr / num_adj_nodes) #define TRAV_LIKELIHOOD_LEAF(r_hop_prob, p, index, num_vertices) ((1 - (r_hop_prob)) * (p)[index].curr / (num_vertices - 1)) struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } long string_to_long(char *str) { long val; char *endptr; errno = 0; val = strtol(str, &endptr, 10); if((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) || (errno != 0 && val == 0) || (endptr == str)) { perror("Error while converting string to long value"); val = -1; } return val; } void initialize_vertices(vertex_t *g, long num_vertices) { long i; for(i = 0;i < num_vertices;i++) { g[i].curr_page_rank = 1 / (pr_type_t)num_vertices; g[i].next_page_rank = RAND_HOP_LIKELIHOOD(rand_hop, num_vertices); g[i].num_adj_nodes = 0; g[i].last_node_addr = NULL; g[i].next = NULL; } } int append_node(vertex_t *g, long parent_vertex, long child_vertex, long num_verts, long *num_edges) { if(parent_vertex >= num_verts || child_vertex >= num_verts) { printf("Invalid arguments\n"); return -1; } adj_vert_t *ptr = (adj_vert_t *)malloc(sizeof(adj_vert_t)); ptr->vertex_num = child_vertex; ptr->next = NULL; if(g[parent_vertex].next == NULL) { g[parent_vertex].next = ptr; g[parent_vertex].last_node_addr = ptr; } else { g[parent_vertex].last_node_addr->next = ptr; g[parent_vertex].last_node_addr = ptr; } g[parent_vertex].num_adj_nodes++; (*num_edges)++; return 0; } void calc_bfs_pg_rank_serial(compact_adj_node_t *vertex_array,long *edge_array,char *frontier_array,char *visited_array,p_rank_struct_t *p_rank_array,long num_vertices,long num_edges,long *num_front,long i) { long j, term_ind; pr_type_t p_rank_val; if(frontier_array[i]) { frontier_array[i] = 0; (*num_front)--; visited_array[i] = 1; term_ind = (i == num_vertices - 1) ? num_edges : vertex_array[i + 1].edge_index; p_rank_val = TRAV_LIKELIHOOD(rand_hop,p_rank_array,i,(term_ind - vertex_array[i].edge_index)); if(!vertex_array[i].is_leaf) { for(j = vertex_array[i].edge_index;j < term_ind;j++) { p_rank_array[edge_array[j]].next += p_rank_val; if(!visited_array[edge_array[j]]) { if(!frontier_array[edge_array[j]]) { (*num_front)++; frontier_array[edge_array[j]] = 1; } } } } else { for(j = 0;j < num_vertices && j != i;j++) p_rank_array[j].next += TRAV_LIKELIHOOD_LEAF(rand_hop,p_rank_array,i,num_vertices); } } } __global__ void calc_bfs_pg_rank_cuda(compact_adj_node_t *vertex_array,long *edge_array,p_rank_struct_t *p_rank_array,long num_vertices,long num_edges) { long i, j, term_ind; pr_type_t p_rank_val; i = blockIdx.x * blockDim.x + threadIdx.x; if(i < num_vertices) { term_ind = (i == num_vertices - 1) ? num_edges : vertex_array[i + 1].edge_index; if(!vertex_array[i].is_leaf) { p_rank_val = TRAV_LIKELIHOOD(d_rand_hop,p_rank_array,i,(term_ind - vertex_array[i].edge_index)); for(j = vertex_array[i].edge_index;j < term_ind;j++) atomicAdd(&p_rank_array[edge_array[j]].next,p_rank_val); } else { p_rank_val = TRAV_LIKELIHOOD_LEAF(d_rand_hop,p_rank_array,i,num_vertices); for(j = 0;j < num_vertices;j++) { if(j != i) atomicAdd(&p_rank_array[j].next,p_rank_val); } } } } __global__ void update_pr(p_rank_struct_t *p_rank_array, pr_type_t *pr_diff_max, long num_vertices) { long i; pr_type_t curr_diff; i = blockIdx.x * blockDim.x + threadIdx.x; if(i < num_vertices) { curr_diff = fabsf(p_rank_array[i].next - p_rank_array[i].curr); atomicAdd(pr_diff_max,curr_diff); p_rank_array[i].curr = p_rank_array[i].next; p_rank_array[i].next = RAND_HOP_LIKELIHOOD(d_rand_hop,num_vertices); } } void print_converged_pr_vals(p_rank_struct_t *p_rank, long num_vertices) { long i; pr_type_t sum=0; for(i = 0;i < num_vertices;i++){ printf("Converged page rank for node %lu : %.10f\n",i,p_rank[i].curr); sum += p_rank[i].curr; } printf("Sum is %f\n",sum); } int main(int argc, char *argv[]) { cudaEvent_t start, stop; float elapsed_gpu; long i,j; FILE *file; char *token1, *token2; char line[MAX_LINE_LEN]; pr_type_t *d_pr_diff; pr_type_t pr_diff; long num_vertices = 0; long pnode, cnode; long iterations=0; vertex_t *graph; compact_adj_node_t *vertex_array, *d_vertex_array; long *edge_array, *d_edge_array; p_rank_struct_t *p_rank_array, *d_p_rank_array; long num_edges = 0; adj_vert_t *avert; struct timespec time_diff; struct timespec diff(struct timespec start, struct timespec end); struct timespec time1, time2; if(argc != 3) return -1; num_vertices = string_to_long(argv[1]); if(num_vertices < 0) return -1; graph = (vertex_t *)malloc(num_vertices * sizeof(vertex_t)); epsilon =(pr_type_t) 0.00001; if(!graph) return -1; initialize_vertices(graph, num_vertices); file = fopen(argv[2],"r"); if(file) { while (fgets(line, sizeof(line), file)) { token1 = strtok (line,GRAPH_FILE_SEPERATOR); token2 = strtok(NULL,GRAPH_FILE_SEPERATOR); if(token1 == NULL || token2 == NULL || strtok(NULL,GRAPH_FILE_SEPERATOR) != NULL) return -1; pnode = string_to_long(token1); cnode = string_to_long(token2); if(pnode < 0 || cnode < 0) return -1; if(append_node(graph,pnode,cnode,num_vertices,&num_edges)) return -1; } } else return -1; cudaEventCreate(&start); cudaEventCreate(&stop); // Record event on the default stream cudaEventRecord(start, 0); //Compact Adjacency list vertex_array = (compact_adj_node_t *)calloc(num_vertices, sizeof(compact_adj_node_t)); edge_array = (long *)calloc(num_edges, sizeof(long)); p_rank_array = (p_rank_struct_t *)malloc(num_vertices * sizeof(p_rank_struct_t)); if(cudaMalloc(&d_vertex_array,num_vertices * sizeof(compact_adj_node_t)) != cudaSuccess) { printf("Error in cudaMalloc\n"); return -2; } if(cudaMalloc(&d_edge_array,num_edges * sizeof(long)) != cudaSuccess) { printf("Error in cudaMalloc\n"); return -2; } if(cudaMalloc(&d_p_rank_array,num_vertices * sizeof(p_rank_struct_t)) != cudaSuccess) { printf("Error in cudaMalloc\n"); return -2; } for(i = 0,j = 0;i < num_vertices;i++) { //Initialize Page Rank values p_rank_array[i].next = RAND_HOP_LIKELIHOOD(rand_hop,num_vertices); p_rank_array[i].curr = 1 / (pr_type_t)num_vertices; vertex_array[i].edge_index = j; for(avert = (adj_vert_t *)graph[i].next;avert != NULL;avert = avert->next) edge_array[j++] = avert->vertex_num; if(vertex_array[i].edge_index - j == 0) vertex_array[i].is_leaf = 1; } if(cudaMemcpy(d_vertex_array,vertex_array,num_vertices * sizeof(compact_adj_node_t),cudaMemcpyHostToDevice) != cudaSuccess) { printf("Error in cudaMemcpy\n"); return -2; } if(cudaMemcpy(d_edge_array,edge_array,num_edges * sizeof(long),cudaMemcpyHostToDevice) != cudaSuccess) { printf("Error in cudaMemcpy\n"); return -2; } if(cudaMemcpy(d_p_rank_array,p_rank_array,num_vertices * sizeof(p_rank_struct_t),cudaMemcpyHostToDevice) != cudaSuccess) { printf("Error in cudaMemcpy\n"); return -2; } printf("Graph parsing successful\n"); if(cudaMallocHost(&d_pr_diff,sizeof(pr_type_t)) != cudaSuccess) { printf("Error in cudaMalloc\n"); return -2; } do { pr_diff = 0; if(cudaMemcpy(d_pr_diff,&pr_diff,sizeof(pr_type_t),cudaMemcpyHostToDevice) != cudaSuccess) { printf("Error in cudaMemcpy\n"); return -2; } calc_bfs_pg_rank_cuda<<<(num_vertices / GPU_BLOCK_SIZE) + 1, GPU_BLOCK_SIZE>>>(d_vertex_array,d_edge_array,d_p_rank_array,num_vertices,num_edges); cudaDeviceSynchronize(); update_pr<<<(num_vertices / GPU_BLOCK_SIZE) + 1, GPU_BLOCK_SIZE>>>(d_p_rank_array, d_pr_diff, num_vertices); cudaDeviceSynchronize(); if(cudaMemcpy(&pr_diff,d_pr_diff,sizeof(pr_type_t),cudaMemcpyDeviceToHost) != cudaSuccess) { printf("Error in cudaMemcpy\n"); return -2; } }while(pr_diff > epsilon); if(cudaMemcpy(p_rank_array,d_p_rank_array,num_vertices * sizeof(p_rank_struct_t),cudaMemcpyDeviceToHost) != cudaSuccess) { printf("Error in cudaMemcpy\n"); return -2; } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_gpu, start, stop); printf("\nGPU time: %f (msec)\n", elapsed_gpu); cudaEventDestroy(start); cudaEventDestroy(stop); //print_converged_pr_vals(p_rank_array, num_vertices); free(vertex_array); free(edge_array); free(p_rank_array); cudaFree(vertex_array); cudaFree(edge_array); cudaFreeHost(p_rank_array); return 0; }
22,050
// incrementArray.cu #include <stdio.h> #include <assert.h> #include <cuda.h> void incrementArrayOnHost(float *a, int N) { int i; for (i=0; i < N; i++) a[i] = a[i]+1.f; } __global__ void incrementArrayOnDevice(float *a, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx<N) a[idx] = a[idx]+1.f; } __global__ void idOnDevice(float *a, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; //what is a block(blockIdx)? Is thread inside of a block? What does the blockDim tell us? printf("blockIdx %d, blockDim %d, ThreadIdx %d\n",blockIdx.x,blockDim.x,threadIdx.x); if (idx<N) a[idx] = threadIdx.x; } int main(void) { float *a_h, *b_h; // pointers to host memory float *a_d; // pointer to device memory int i, N = 10; size_t size = N*sizeof(float); // allocate arrays on host a_h = (float *)malloc(size); b_h = (float *)malloc(size); // allocate array on device cudaMalloc((void **) &a_d, size); // initialization of host data for (i=0; i<N; i++) a_h[i] = (float)i; // copy data from host to device cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice); // do calculation on host incrementArrayOnHost(a_h, N); // do calculation on device: // Part 1 of 2. Compute execution configuration int blockSize = 4; int nBlocks = N/blockSize + (N%blockSize == 0?0:1); fprintf(stderr,"nBlocks %d, blocksize %d\n",nBlocks,blockSize); // Part 2 of 2. Call incrementArrayOnDevice kernel //incrementArrayOnDevice <<< nBlocks, blockSize >>> (a_d, N); idOnDevice <<< nBlocks, blockSize >>> (a_d, N); // Retrieve result from device and store in b_h cudaMemcpy(b_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // check results fprintf(stderr,"Checking Results\n"); for (i=0; i<N; i++) { if(a_h[i] != b_h[i]) { fprintf(stderr,"a_h[%d]=%f, b_h[%d]=%f\n",i,a_h[i],i,b_h[i]); } //assert(a_h[i] == b_h[i]); } // cleanup free(a_h); free(b_h); cudaFree(a_d); }
22,051
#include <iostream> #include <stdio.h> #include <assert.h> #include <sys/time.h> #include <memory> #include <fstream> #include <queue> class Graph{ public: Graph(const int verticeNum){ this->verticeNum = verticeNum; graphMatrix = new int*[verticeNum]; for(int i = 0; i < verticeNum; i++){ graphMatrix[i] = new int [verticeNum]; } } Graph(std::string fileName){ std::ifstream file; file.open(fileName); if(!file){ printf("Unable to open file!\n"); exit(1); } file >> verticeNum; //find vertice number //skip the second line int tmp; file >> tmp; //init matrix... graphMatrix = new int*[verticeNum]; for(int i = 0; i < verticeNum; i++){ graphMatrix[i] = new int [verticeNum]; } for(int i = 0 ; i < verticeNum; i++){ for(int j =0 ; j < verticeNum; j++){ graphMatrix[i][j] = -1; } } int vA, vB; while (!file.eof()) { file >> vA >> vB; int linkWeight = generateRandomNum(100); graphMatrix[vA][vB] = linkWeight; graphMatrix[vB][vA] = linkWeight; } file.close(); } void printGraphMatrix(){ for(int i = 0 ; i < verticeNum; i++){ for(int j =0 ; j < verticeNum; j++){ printf("%d\t", graphMatrix[i][j]); } printf("\n"); } } void BFS(){ const int vNum = verticeNum; bool visited[vNum]; for(int i = 0; i < vNum; i++){ visited[i] = false; } std::queue<int> bfsQueue; for(int i = 0; i < verticeNum; i++){ //if not visited, we do processing.. if(!visited[i]){ //mark the node as visited visited[i] = true; //printf("visit node %d\n", i); bfsQueue.push(i); while(!bfsQueue.empty()){ i = bfsQueue.front(); bfsQueue.pop(); for(int j = 0; j < verticeNum; j++){ if(graphMatrix[i][j] != -1 && !visited[j]){ visited[j] = true; //printf("inner visit node %d\n", j); bfsQueue.push(j); } } } } } } void _recursiveBFS(std::queue<int>* bfsQueue, bool visited[]){ while(!bfsQueue->empty()){ int i = bfsQueue->front(); bfsQueue->pop(); for(int j = 0; j < verticeNum; j++){ if(graphMatrix[i][j] != -1 && !visited[j]){ visited[j] = true; //printf("inner visit node %d\n", j); bfsQueue->push(j); } } _recursiveBFS(bfsQueue, visited); } } void recursiveBFS(){ const int vNum = verticeNum; bool visited[vNum]; for(int i = 0; i < vNum; i++){ visited[i] = false; } std::queue<int> bfsQueue; for(int i = 0; i < verticeNum; i++){ if(!visited[i]){ //printf("visit node %d\n", i); bfsQueue.push(i); visited[i] = true; _recursiveBFS(&bfsQueue, visited); } } } int** graphMatrix; int verticeNum; private: void generateRandomMatrix(int verticeNum){ for(int i = 0 ; i < verticeNum; i++){ for(int j =0 ; j < verticeNum; j++){ graphMatrix[i][j] = generateRandomNum(100); } } } int generateRandomNum(int max){ return rand() % 100; } }; struct Node{ int data; Node* next; Node* prev; Node(int data){ this->data = data; } Node(){} }Node; struct List{ struct Node* head, *tail; int listSize; List(int vNum){ head = (struct Node*)malloc(sizeof(Node) * vNum); tail = head + 1; head->next = tail; tail->prev = head; listSize = 0; } ~List(){ delete []head; } void push(int data){ struct Node* node = new struct Node(data); if(head->next == tail){ head->next = node; tail->prev = node; node->next = tail; node->prev = head; }else{ node->prev = tail->prev; node->next = tail; tail->prev->next = node; tail->prev = node; } listSize++; } int pop(){ assert(!isEmpty()); struct Node * tmp = head->next; tmp->prev->next = tmp->next; tmp->next->prev = tmp->prev; int n = tmp->data; delete tmp; listSize--; return n; } __device__ void cudaPush(int data){ struct Node* node = (struct Node*)malloc(sizeof(struct Node)); node->data = data; if(head->next == tail){ head->next = node; tail->prev = node; node->next = tail; node->prev = head; }else{ node->prev = tail->prev; node->next = tail; tail->prev->next = node; tail->prev = node; } listSize++; } __device__ int cudaPop(){ assert(!isEmpty()); struct Node * tmp = head->next; tmp->prev->next = tmp->next; tmp->next->prev = tmp->prev; int n = tmp->data; delete tmp; listSize--; return n; } __device__ int cudaGetSize(){ return listSize; } __device__ bool cudaIsEmpty(){ if(head->next == tail){ return true; }else{ return false; } } int getSize(){ return listSize; } bool isEmpty(){ if(head->next == tail){ return true; }else{ return false; } } void printList(){ struct Node* node = head->next; while(node != tail){ //printf("value %d\n", node->data); node = node->next; } } }; __global__ void cudaComputeBFS(bool* visited, int** graphMatrix, List* bfsQueue, int* i){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(graphMatrix[*i][x] != -1 && !visited[x]){ visited[x] = true; //printf("inner visit node %d\n", x); bfsQueue->cudaPush(x); } } void cudaBFS(Graph g){ const int vNum = g.verticeNum; bool visited[vNum]; for(int i = 0; i < vNum; i++){ visited[i] = false; } struct List bfsQueue(vNum); for(int i = 0; i < vNum; i++){ //if not visited, we do processing.. if(!visited[i]){ //mark the node as visited visited[i] = true; printf("visit node %d\n", i); bfsQueue.push(i); while(!bfsQueue.isEmpty()){ i = bfsQueue.pop(); //cuda optimization... const int BLOCK_SIZE = 64; dim3 dimBlock((vNum + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1); dim3 dimGrid(BLOCK_SIZE, 1, 1); bool* cudaVisited; int** cudaGraphMatrix; List* cudaBfsQueue; int* cudaI; int* cudaSize; int* size; cudaMalloc(&cudaVisited, sizeof(bool) * vNum); cudaMalloc(&cudaBfsQueue->head, sizeof(Node) * vNum + 2); cudaMalloc(&cudaGraphMatrix, sizeof(int *) * vNum); cudaMalloc(&cudaI, sizeof(int*)); cudaMalloc(&cudaSize, sizeof(int*)); for(int k = 0; k < vNum; k++){ cudaMalloc(&cudaGraphMatrix[k], sizeof(int) * vNum); } //copy data to device cudaMemcpy(cudaVisited, visited, sizeof(bool) * vNum, cudaMemcpyHostToDevice); int count = 0; while(!bfsQueue.isEmpty()){ cudaMemcpy(&cudaBfsQueue->head + count++, &bfsQueue.head + count++, sizeof(Node), cudaMemcpyHostToDevice); } for(int k = 0; k < vNum; k++){ cudaMemcpy(cudaGraphMatrix, g.graphMatrix, sizeof(int) * vNum, cudaMemcpyHostToDevice); } cudaMemcpy(cudaI, &i, sizeof(int), cudaMemcpyHostToDevice); cudaComputeBFS<<<dimGrid, dimBlock>>>(cudaVisited, cudaGraphMatrix, cudaBfsQueue, cudaI); //update visited & queue... cudaMemcpy(visited, cudaVisited, sizeof(bool) * vNum, cudaMemcpyDeviceToHost); cudaMemcpy(&bfsQueue.head, &cudaBfsQueue->head, sizeof(Node) * vNum, cudaMemcpyDeviceToHost); cudaFree(cudaVisited); cudaFree (cudaGraphMatrix); cudaFree(cudaI); cudaFree(cudaBfsQueue); } } } } int main(){ struct timeval tpstart, tpend; long timeuse; Graph graph("./largeG.txt"); gettimeofday( &tpstart, NULL ); //graph.BFS(); gettimeofday (&tpend, NULL); timeuse = 1000 * (tpend.tv_sec - tpstart.tv_sec) + (tpend.tv_usec - tpstart.tv_usec) / 1000; printf("CPU looped version is finished in time %ld ms\n", timeuse); gettimeofday( &tpstart, NULL ); //graph.recursiveBFS(); gettimeofday (&tpend, NULL); timeuse = 1000 * (tpend.tv_sec - tpstart.tv_sec) + (tpend.tv_usec - tpstart.tv_usec) / 1000; printf("CPU recursive version is finished in time %ld ms\n", timeuse); gettimeofday( &tpstart, NULL ); //cudaBFS(graph); gettimeofday (&tpend, NULL); timeuse = 1000 * (tpend.tv_sec - tpstart.tv_sec) + (tpend.tv_usec - tpstart.tv_usec) / 1000; printf("GPU version is finished in time %ld ms\n", timeuse); return 0; }
22,052
#include "includes.h" extern "C" { } #define TB 256 #define EPS 0.1 #undef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #undef MAX #define MAX(a, b) ((a) > (b) ? (a) : (b)) __global__ void hist_remap2_kernel( float *I, int nI, float *mI, float *histJ, float *cumJ, float *_minJ, float *_maxJ, int nbins, float *_sortI, int *_idxI, float *R, int c, int h, int w ) { int _id = blockIdx.x * blockDim.x + threadIdx.x; int size = h * w; if (_id < c * size) { // _id = dc * size + id int id = _id % size, dc = _id / size; float minJ = _minJ[dc]; float maxJ = _maxJ[dc]; float stepJ = (maxJ - minJ) / nbins; int idxI = _idxI[_id] - 1; if (mI[idxI] < EPS) return ; int offset = h * w - nI; int cdf = id - offset; int s = 0; int e = nbins - 1; int m = (s + e) / 2; int binIdx = -1; while (s <= e) { // special handling for range boundary float cdf_e = m == nbins - 1 ? cumJ[dc * nbins + m] + 0.5f : cumJ[dc * nbins + m]; float cdf_s = m == 0 ? -0.5f : cumJ[dc * nbins + m - 1]; if (cdf >= cdf_e) { s = m + 1; m = (s + e) / 2; } else if (cdf < cdf_s) { e = m - 1; m = (s + e) / 2; } else { binIdx = m; break; } } float hist = histJ[dc * nbins + binIdx]; float cdf_e = cumJ[dc * nbins + binIdx]; float cdf_s = cdf_e - hist; float ratio = MIN(MAX((cdf - cdf_s) / (hist + 1e-8), 0.0f), 1.0f); float activation = minJ + (static_cast<float>(binIdx) + ratio) * stepJ; R[dc * size + idxI] = activation; } return ; }
22,053
#include "includes.h" __global__ void SumSymbolsKernel( float *symbolOne, float *symbolTwo, float *result, int symbolSize ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < symbolSize) { result[threadId] = symbolOne[threadId] + symbolTwo[threadId]; } }
22,054
#include "includes.h" __global__ void ConditionCFLKernel2D2 (double *newDT, double *DT2D, double *DT1D, double *Vmoy, double *invRmed, int *CFL, int nsec, int nrad, double DeltaT) { int i = threadIdx.x + blockDim.x*blockIdx.x; int k; double dt; double newdt = 1e30; if (i>0 && i<nrad){ newDT[i] = newdt; for (k = 0; k < nsec; k++) if (DT2D[i*nsec + k] < newDT[i]) newDT[i] = DT2D[i*nsec + k]; // for each dt in nrad } if (i<nrad-1){ dt = 2.0*PI*CFLSECURITY/(double)nsec/fabs(Vmoy[i]*invRmed[i]-Vmoy[i+1]*invRmed[i+1]); DT1D[i] = dt; // array nrad size dt } }
22,055
#include <stdio.h> void __global__ kernel_add_one(int* a, int length) { int gid = threadIdx.x + blockDim.x*blockIdx.x; while(gid < length) { a[gid] += 1; gid += blockDim.x*gridDim.x; } }
22,056
/* fragment gpu RAM by allocating a bunch of blocks and then releasing some in between, creating holes then try to allocate more than the size of the largest hole, but less than total free memory it appears that CUDA succeeds conclusiong: cudaMalloc it's not allocating contiguous memory */ #include <stdio.h> #include <unistd.h> #include <cuda.h> const size_t Mb = 1<<20; // Assuming a 1Mb page size here #define DSIZE0 410000000ULL // ~400MB #define DSIZE1 3144000000ULL // ~3000MB #define DSIZE2 524000000ULL // ~500MB #define DSIZE3 630000000ULL // ~600MB void can_allocate() { size_t total; size_t avail; cudaError_t cuda_status = cudaMemGetInfo(&avail, &total); if ( cudaSuccess != cuda_status ) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); exit(EXIT_FAILURE); } printf("free: %.f, total %.f\n", (double)avail/Mb, (double)total/Mb); int *buf_d = 0; size_t nwords = total / sizeof(int); size_t words_per_Mb = Mb / sizeof(int); /* the only way to measure how much memory is allocatable is by trial and error, cudaMemGetInfo's available memory information is not reliable */ while (cudaMalloc((void**)&buf_d, nwords * sizeof(int)) == cudaErrorMemoryAllocation) { cudaFree(buf_d); nwords -= words_per_Mb; if (nwords < words_per_Mb) { // signal no free memory break; } } cudaFree(buf_d); /* clear last error */ printf("err2: %d\n", (int)cudaGetLastError()); printf("can allocate: %.fMB\n", (double)nwords/words_per_Mb); } int main() { int *d0, *d1, *d2, *d3, *d4; //cudaSetDevice(0); /* starting with 8GB free */ /* legend: [allocated]{free} */ // init - prealloc 500MB, including ~100MB CUDA ctx // [0.5]{7.5} - free total=7.5 cudaMalloc(&d0, DSIZE0); printf("err1: %d\n", (int)cudaGetLastError()); // [0.5][0.5]{7.0} - free total=7.0 cudaMalloc(&d1, DSIZE2); printf("err1: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3]{4.0} - free total=4.0 cudaMalloc(&d2, DSIZE1); printf("err2: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3][0.5]{3.5} - free total=3.5 cudaMalloc(&d3, DSIZE2); printf("err3: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3][0.5][3]{0.5} - free total=0.5 cudaMalloc(&d4, DSIZE1); printf("err2: %d\n", (int)cudaGetLastError()); // [0.5]{0.5}[3][0.5][3]{0.5} - free total=1.0 cudaFree(d1); printf("err4: %d\n", (int)cudaGetLastError()); // [0.5]{0.5}[3]{0.5}[3]{0.5} - free total=1.5 cudaFree(d3); printf("err4: %d\n", (int)cudaGetLastError()); // here we should have 1.5GB free in total, with 3 fragments of 0.5GB // this should say 0.5GB, but it says 1.6GB - so it allocates over fragments can_allocate(); // another way to check is we shouldn't be able to allocate say 1GB of contiguous memory cudaMalloc(&d1, 2*DSIZE2); printf("err2: %d\n", (int)cudaGetLastError()); // sanity check 2GB at 1.5G free should fail // this fails, good cudaMalloc(&d1, 4*DSIZE2); printf("err2: %d\n", (int)cudaGetLastError()); sleep(1000); /* keep consuming RAM */ return 0; }
22,057
#include "includes.h" __global__ void second_calculation( char* dev_a, char* dev_b, char* dev_c, int k, int num_matrices, int matrix_size ) { // Each thread handles a matrix int j = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (j >= matrix_size) return; //If first value in the row of the matrix, do addition if (dev_a[k*matrix_size*matrix_size+j*matrix_size] < threshold){ for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] + dev_b[index]; } //Do subtraction } else { for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] - dev_b[index]; } } }
22,058
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define MASK_WIDTH 5 #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 #define TILE_WIDTH 32 #define SHARED_WIDTH TILE_WIDTH+(MASK_WIDTH-1)/2 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(PPMImage *img) { fprintf(stdout, "P6\n"); fprintf(stdout, "# %s\n", COMMENT); fprintf(stdout, "%d %d\n", img->x, img->y); fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR); fwrite(img->data, 3 * img->x, img->y, stdout); fclose(stdout); } void Smoothing_CPU_Serial(PPMImage *image, PPMImage *image_copy) { int i, j, y, x; int total_red, total_blue, total_green; for (i = 0; i < image->y; i++) { for (j = 0; j < image->x; j++) { total_red = total_blue = total_green = 0; for (y = i - ((MASK_WIDTH-1)/2); y <= (i + ((MASK_WIDTH-1)/2)); y++) { for (x = j - ((MASK_WIDTH-1)/2); x <= (j + ((MASK_WIDTH-1)/2)); x++) { if (x >= 0 && y >= 0 && y < image->y && x < image->x) { total_red += image_copy->data[(y * image->x) + x].red; total_blue += image_copy->data[(y * image->x) + x].blue; total_green += image_copy->data[(y * image->x) + x].green; } //if } //for z } //for y image->data[(i * image->x) + j].red = total_red / (MASK_WIDTH*MASK_WIDTH); image->data[(i * image->x) + j].blue = total_blue / (MASK_WIDTH*MASK_WIDTH); image->data[(i * image->x) + j].green = total_green / (MASK_WIDTH*MASK_WIDTH); } } } void vector_Image(PPMImage *image_output, unsigned char* image_output_vector, int dim ) { for ( int i = 0; i < dim; i++ ) { image_output->data[i].red = image_output_vector[i*3]; image_output->data[i].green = image_output_vector[i*3+1]; image_output->data[i].blue = image_output_vector[i*3+2]; } } unsigned char* Image_vector(PPMImage* image_input, int dim ) { unsigned char* image_input_vector = ( unsigned char* ) malloc ( sizeof ( unsigned char ) * dim*3 ); //Fills the vector with the already normalized data. for ( int i = 0; i < dim; i++) { image_input_vector[i*3] = image_input->data[i].red; image_input_vector[i*3+1] = image_input->data[i].green; image_input_vector[i*3+2] = image_input->data[i].blue; } return image_input_vector; } __global__ void smothing_filter_Paralelo(unsigned char* image_input, unsigned char* image_output, int width, int height, int dim ) { __shared__ int private_red[TILE_WIDTH*TILE_WIDTH]; __shared__ int private_green[TILE_WIDTH*TILE_WIDTH]; __shared__ int private_blue[TILE_WIDTH*TILE_WIDTH]; int y, x; int total_red = 0, total_green = 0, total_blue = 0; int bx = blockIdx.x; int by = blockIdx.y; int tx= threadIdx.x; int ty = threadIdx.y; /*if((tx * TILE_WIDTH + ty) < TILE_WIDTH * TILE_WIDTH){ private_red[tx * TILE_WIDTH + ty] = 0; private_green[tx * TILE_WIDTH + ty] = 0; private_blue[tx * TILE_WIDTH + ty] = 0; } __syncthreads();*/ //Compute the current element index int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; // Calculate the index of the image int index = row * width + col; if((row < width && col < height) && (index < dim)) { private_red[ty * TILE_WIDTH + tx] = image_input[index * 3]; private_green[ty * TILE_WIDTH + tx] = image_input[index * 3 + 1]; private_blue[ty * TILE_WIDTH + tx] = image_input[index * 3 + 2]; } else { private_red[ty * TILE_WIDTH + tx] = 0; private_green[ty * TILE_WIDTH + tx] = 0; private_blue[ty * TILE_WIDTH + tx] = 0; } __syncthreads(); if(row < width && col < height) { for (y = row - ((MASK_WIDTH-1)/2); y <= (row + ((MASK_WIDTH-1)/2)); y++) { for (x = col - ((MASK_WIDTH-1)/2); x <= (col + ((MASK_WIDTH-1)/2)); x++) { if (x >= 0 && y >= 0 && y < width && x < height) { total_red += private_red[y * TILE_WIDTH + x]; total_blue += private_green[y * TILE_WIDTH + x]; total_green += private_blue[y * TILE_WIDTH + x]; } //if } //for z } //for y __syncthreads(); image_output[index * 3] = total_red / (MASK_WIDTH*MASK_WIDTH); image_output[index * 3 + 1] = total_blue / (MASK_WIDTH*MASK_WIDTH); image_output[index * 3 + 2] = total_green / (MASK_WIDTH*MASK_WIDTH); } /* int y, x; int total_red = 0, total_green = 0, total_blue = 0; int bx = blockIdx.x; int by = blockIdx.y; int tx= threadIdx.x; int ty = threadIdx.y; //Compute the current element index int row = by * blockDim.y + ty; int col = bx* blockDim.x+ tx; //Computes the histogram position and increments it. //if ( ( row < width && col < height ) && ( index < dim ) ) { total_red = total_blue = total_green = 0; for(int p=0; p<ceil((float)dim/TILE_WIDTH); ++p) { int i = row; int j = p*TILE_WIDTH+tx; if(i < width && j < height) { private_red[ty][tx] = image_input[(i*width + j+tx)*3]; private_green[ty][tx] = image_input[(i*width + j+tx)*3+1]; private_blue[ty][tx] = image_input[(i*width + j+tx)*3+2]; } else { private_red[ty][tx] = 0; private_green[ty][tx] = 0; private_blue[ty][tx] = 0; } __syncthreads(); for (y = i - ((MASK_WIDTH-1)/2); y <= (i + ((MASK_WIDTH-1)/2)); y++) { for (x = j - ((MASK_WIDTH-1)/2); x <= (j + ((MASK_WIDTH-1)/2)); x++) { int curr_indx = (y * height) + x; if (x >= 0 && y >= 0 && y < height && x < width) { total_red += private_red[ty][tx]; total_blue += private_green[ty][tx]; total_green += private_blue[ty][tx]; } //if } //for z } //for y __syncthreads(); image_output[(i*width + j+tx)*3] = total_red / (MASK_WIDTH*MASK_WIDTH); image_output[(i*width + j+tx)*3+1] = total_blue / (MASK_WIDTH*MASK_WIDTH); image_output[(i*width + j+tx)*3+2] = total_green / (MASK_WIDTH*MASK_WIDTH); //} } */ } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } double t_start, t_end; int i; char *filename = argv[1]; //Recebendo o arquivo!; PPMImage *image = readPPM(filename); PPMImage *image_output = readPPM(filename); int x = image->x; int y = image->y; int dim = x*y; unsigned char* image_vect = Image_vector(image,dim); // Iniatialize variables unsigned char* image_par; unsigned char* image_output_par; // Alloc CPU memory unsigned char* image_output_vect = Image_vector(image,dim); // Alloc space for device copies of a, b, c ////////////////// cudaMalloc((void**)&image_par, sizeof(unsigned char)*dim*3); cudaMalloc((void**)&image_output_par, sizeof(unsigned char)*dim*3); // Copy inputs to device //////////////////////////////////////////////////////////////// cudaMemcpy(image_par, image_vect, sizeof(unsigned char)*dim*3 ,cudaMemcpyHostToDevice ); cudaMemcpy(image_output_par, image_output_vect, sizeof(unsigned char)*dim*3 ,cudaMemcpyHostToDevice ); // Initialize dimGrid and dimBlocks // create a grid with (32 / columns) number of columns and (32 / lines) number of rows // the ceiling function makes sure there are enough to cover all elements dim3 dimGrid(ceil((float)y / TILE_WIDTH), ceil((float)x / TILE_WIDTH), 1); // create a block with 32 columns and 32 rows dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); // Launch smothing_filter_Paralelo() kernel on GPU with a grid and block as input //// smothing_filter_Paralelo<<<dimGrid, dimBlock>>>(image_par, image_output_par,x,y,dim); // Copy result to local array ////////////////////////////////////////////// cudaMemcpy(image_output_vect, image_output_par, sizeof(unsigned char)*dim*3, cudaMemcpyDeviceToHost); vector_Image(image_output,image_output_vect,dim); writePPM(image_output); cudaFree(image_par); cudaFree(image_output_par); free(image); free(image_output); }
22,059
/* * purpose: CUDA managed unified memory for >= pascal architectures; * this version just uses the variant of globally managed * memory hopefully set aside on the device, but for the rest * everything remains pretty similar to previous attempts; * result: from profiling via 'nvprof ./a.out' we now see pretty * good results too, practically identical to the prefetching * run which showed best performance so far; no page faults * since the globally __managed__ resides on the GPU anyway * compilation: nvcc ./unified_memory_example_5.cu * usage: ./a.out */ #include <stdio.h> #define ARRAYDIM 268435456 /* * managed variable declaration for GPU memory (total of 3GB) */ __device__ __managed__ float x[ARRAYDIM], y[ARRAYDIM], z[ARRAYDIM]; /* * GPU kernel doing the initialization */ __global__ void KrnlDmmyInit() { int i; i = (blockIdx.x * blockDim.x) + threadIdx.x; x[i] = (float) i; y[i] = (float) (i + 1); return; } /* * GPU kernel doing the calculation, ie adding together two arrays */ __global__ void KrnlDmmyCalc() { int i; i = (blockIdx.x * blockDim.x) + threadIdx.x; z[i] = x[i] + y[i]; return; } /* * host main */ int main() { int i, cudaRtrn; dim3 thrds_per_block, blcks_per_grid; /* * so all we want to do is calling simple kernels that * (i) initialize array elements a[] and b[] with thread-specific * values and * (ii) add together these values and store back the results into * array c[] where the latter task shall be repeated within a loop * over 100 iterations */ thrds_per_block.x = 256; blcks_per_grid.x = ARRAYDIM / thrds_per_block.x; KrnlDmmyInit<<<blcks_per_grid, thrds_per_block>>>(); cudaDeviceSynchronize(); //printf("initialization completed\n"); //printf("x[10] %f y[10] %f z[10] %f\n", x[10], y[10], z[10]); //printf("x[100] %f y[100] %f z[100] %f\n", x[100], y[100], z[100]); //printf("x[1000] %f y[1000] %f z[1000] %f\n", x[1000], y[1000], z[1000]); for (i=0; i<100; i++) { KrnlDmmyCalc<<<blcks_per_grid, thrds_per_block>>>(); cudaDeviceSynchronize(); //printf("iteration %d\n", i); } return(0); }
22,060
#include "includes.h" __global__ void inefficient_prefixSum(float* in, int in_length, float* out ){ //shared memory declaration extern __shared__ float DSM[]; //compute index int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < in_length){ //load on shared memory DSM[threadIdx.x] = in[idx]; //compute prefix_sum making sequence of sums for(int stride = 1; stride <= threadIdx.x; stride *= 2){ __syncthreads(); DSM[threadIdx.x] = DSM[threadIdx.x] + DSM[threadIdx.x - stride]; } out[idx] = DSM[threadIdx.x]; } }
22,061
/* compile with: nvcc -O3 hw1.cu -o hw1 */ #include <stdio.h> #include <sys/time.h> #include <assert.h> #define INF (1<<16 - 1) ////////////////////////////// DO NOT CHANGE ////////////////////////////////// #define IMG_HEIGHT 256 #define IMG_WIDTH 256 #define N_IMAGES 10000 typedef unsigned char uchar; #define CUDA_CHECK(f) do { \ cudaError_t e = f; \ if (e != cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \ exit(1); \ } \ } while (0) #define SQR(a) ((a) * (a)) void process_image(uchar *img_in, uchar *img_out) { int histogram[256] = { 0 }; for (int i = 0; i < IMG_WIDTH * IMG_HEIGHT; i++) { histogram[img_in[i]]++; } int cdf[256] = { 0 }; int hist_sum = 0; for (int i = 0; i < 256; i++) { hist_sum += histogram[i]; cdf[i] = hist_sum; } int cdf_min = 0; for (int i = 0; i < 256; i++) { if (cdf[i] != 0) { cdf_min = cdf[i]; break; } } uchar map[256] = { 0 }; for (int i = 0; i < 256; i++) { int map_value = (float)(cdf[i] - cdf_min) / (IMG_WIDTH * IMG_HEIGHT - cdf_min) * 255; map[i] = (uchar)map_value; } for (int i = 0; i < IMG_WIDTH * IMG_HEIGHT; i++) { img_out[i] = map[img_in[i]]; } } double static inline get_time_msec(void) { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec * 1e+3 + t.tv_usec * 1e-3; } long long int distance_sqr_between_image_arrays(uchar *img_arr1, uchar *img_arr2) { long long int distance_sqr = 0; for (int i = 0; i < N_IMAGES * IMG_WIDTH * IMG_HEIGHT; i++) { distance_sqr += SQR(img_arr1[i] - img_arr2[i]); } return distance_sqr; } /////////////////////////////////////////////////////////////////////////////// __device__ void compute_histogram(uchar *img, int *res, int res_size) { int tid = threadIdx.x; int work_per_thread = (IMG_WIDTH * IMG_HEIGHT) / blockDim.x; /* initialize the histogram */ if (tid < 256) { res[tid] = 0; } __syncthreads(); /* compute the histogram */ int index; for (int i=0 ; i<work_per_thread ; i++) { index = blockDim.x * i + tid; atomicAdd(&res[img[index]], 1); } __syncthreads(); } __device__ void prefix_sum(int arr[], int arr_size) { int tid = threadIdx.x; int increment; for (int stride=1 ; stride<arr_size ; stride*=2) { if (tid < arr_size && tid >= stride) { increment = arr[tid-stride]; } __syncthreads(); if (tid < arr_size && tid >= stride) { arr[tid] += increment; } __syncthreads(); } } __device__ void arr_min(int arr[], int arr_size, int *res) { int tid = threadIdx.x; /* initialize res to 'inf' */ if (tid == 0) { *res = INF; } __syncthreads(); if (tid < arr_size) arr[tid] && atomicMin(res, arr[tid]); __syncthreads(); } __device__ void compute_map(int *cdf, int cdf_positive_min, uchar *map) { int tid = threadIdx.x; if (tid < 256) { map[tid] = (float)(cdf[tid] - cdf_positive_min) / (IMG_HEIGHT * IMG_WIDTH - cdf_positive_min) * 255; } __syncthreads(); } __device__ void remap_img(uchar *in, uchar *out, uchar *map) { int tid = threadIdx.x; int work_per_thread = (IMG_WIDTH * IMG_HEIGHT) / blockDim.x; /* remap the image */ int index; for (int i=0 ; i<work_per_thread ; i++) { index = blockDim.x * i + tid; out[index] = map[in[index]]; } __syncthreads(); } __device__ void process_image_kernel_aux(uchar *in, uchar *out) { __shared__ int cdf[256]; __shared__ uchar m[256]; __shared__ int cdf_positive_min; compute_histogram(in, cdf, 256); prefix_sum(cdf, 256); arr_min(cdf, 256, &cdf_positive_min); compute_map(cdf, cdf_positive_min, m); remap_img(in, out, m); } /* process a single image by a single threadBlock */ __global__ void process_image_kernel(uchar *in, uchar *out) { process_image_kernel_aux(in, out); } /* process all images concurrently */ __global__ void process_all_images_kernel(uchar *in, uchar *out) { int bid = blockIdx.x; int offset = bid * IMG_WIDTH * IMG_HEIGHT; process_image_kernel_aux(in + offset, out + offset); } int main() { ////////////////////////////// DO NOT CHANGE ////////////////////////////////// uchar *images_in; uchar *images_out_cpu; //output of CPU computation. In CPU memory. uchar *images_out_gpu_serial; //output of GPU task serial computation. In CPU memory. uchar *images_out_gpu_bulk; //output of GPU bulk computation. In CPU memory. CUDA_CHECK( cudaHostAlloc(&images_in, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); CUDA_CHECK( cudaHostAlloc(&images_out_cpu, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); CUDA_CHECK( cudaHostAlloc(&images_out_gpu_serial, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); CUDA_CHECK( cudaHostAlloc(&images_out_gpu_bulk, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); /* instead of loading real images, we'll load the arrays with random data */ srand(0); for (long long int i = 0; i < N_IMAGES * IMG_WIDTH * IMG_HEIGHT; i++) { images_in[i] = rand() % 256; } double t_start, t_finish; // CPU computation. For reference. Do not change printf("\n=== CPU ===\n"); t_start = get_time_msec(); for (int i = 0; i < N_IMAGES; i++) { uchar *img_in = &images_in[i * IMG_WIDTH * IMG_HEIGHT]; uchar *img_out = &images_out_cpu[i * IMG_WIDTH * IMG_HEIGHT]; process_image(img_in, img_out); } t_finish = get_time_msec(); printf("total time %f [msec]\n", t_finish - t_start); long long int distance_sqr; /////////////////////////////////////////////////////////////////////////////// // GPU task serial computation printf("\n=== GPU Task Serial ===\n"); //Do not change uchar *device_img_in = NULL; uchar *device_img_out = NULL; /* allocate memeory on the GPU global memory */ CUDA_CHECK(cudaMalloc(&device_img_in, IMG_WIDTH * IMG_HEIGHT)); CUDA_CHECK(cudaMalloc(&device_img_out, IMG_WIDTH * IMG_HEIGHT)); t_start = get_time_msec(); //Do not change for (int i=0 ; i<N_IMAGES ; i++) { uchar *img_in = &images_in[i * IMG_WIDTH * IMG_HEIGHT]; uchar *img_out = &images_out_gpu_serial[i * IMG_WIDTH * IMG_HEIGHT]; /* copy the relevant image from images_in to the GPU memory allocated */ CUDA_CHECK(cudaMemcpy(device_img_in, img_in, IMG_WIDTH * IMG_HEIGHT, cudaMemcpyHostToDevice)); int blocks = 1; int threads_in_block = 1024; /* invoke the GPU kernel */ process_image_kernel<<<blocks, threads_in_block>>>(device_img_in, device_img_out); /* copy output from GPU memory to relevant location in images_out_gpu_serial */ CUDA_CHECK(cudaMemcpy(img_out, device_img_out, IMG_WIDTH * IMG_HEIGHT, cudaMemcpyDeviceToHost)); } t_finish = get_time_msec(); //Do not change /* free the GPU global memory allocated */ CUDA_CHECK(cudaFree(device_img_in)); CUDA_CHECK(cudaFree(device_img_out)); distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_serial); // Do not change printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); //Do not change // GPU bulk printf("\n=== GPU Bulk ===\n"); //Do not change uchar *device_all_imgs_in = NULL; uchar *device_all_imgs_out = NULL; /* allocate memeory on the GPU global memory */ CUDA_CHECK(cudaMalloc(&device_all_imgs_in, N_IMAGES * IMG_WIDTH * IMG_HEIGHT)); CUDA_CHECK(cudaMalloc(&device_all_imgs_out, N_IMAGES * IMG_WIDTH * IMG_HEIGHT)); t_start = get_time_msec(); //Do not change /* copy the relevant image from images_in to the GPU memory allocated */ CUDA_CHECK(cudaMemcpy(device_all_imgs_in, images_in, N_IMAGES * IMG_WIDTH * IMG_HEIGHT, cudaMemcpyHostToDevice)); int blocks = N_IMAGES; int threads_in_block = 1024; /* invoke the GPU kernel */ process_all_images_kernel<<<blocks, threads_in_block>>> (device_all_imgs_in, device_all_imgs_out); /* copy output from GPU memory to relevant location in images_out_gpu_serial */ CUDA_CHECK(cudaMemcpy(images_out_gpu_bulk, device_all_imgs_out, N_IMAGES * IMG_WIDTH * IMG_HEIGHT, cudaMemcpyDeviceToHost)); t_finish = get_time_msec(); //Do not change /* free the GPU global memory allocated */ CUDA_CHECK(cudaFree(device_all_imgs_in)); CUDA_CHECK(cudaFree(device_all_imgs_out)); distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_bulk); // Do not change printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); //Do not chhange /* free allocated memory */ CUDA_CHECK(cudaFreeHost(images_in)); CUDA_CHECK(cudaFreeHost(images_out_cpu)); CUDA_CHECK(cudaFreeHost(images_out_gpu_serial)); CUDA_CHECK(cudaFreeHost(images_out_gpu_bulk)); return 0; }
22,062
__global__ void applyFilter(const unsigned char *input, unsigned char *output, const unsigned int width, const unsigned int height, const float *kernel, const unsigned int kernelWidth) { const unsigned int col = threadIdx.x + blockIdx.x * blockDim.x; const unsigned int row = threadIdx.y + blockIdx.y * blockDim.y; if(row < height && col < width) { const int half = kernelWidth / 2; float blur = 0.0; for(int i = -half; i <= half; i++) { for(int j = -half; j <= half; j++) { const unsigned int y = max(0, min(height - 1, row + i)); const unsigned int x = max(0, min(width - 1, col + j)); const float w = kernel[(j + half) + (i + half) * kernelWidth]; blur += w * input[x + y * width]; } } output[col + row * width] = static_cast<unsigned char>(blur); } }
22,063
#include "ising.cuh" #include <cmath> // pre-calculated exp(dE / (kB * T)) __constant__ float exp_dE_beta[10]; void set_expdE(const float beta, const float J, const float H, const bool verbose) noexcept { // up == true, down == false; // cash exp(dE) to constant memory const float exp_dE[10] = { // case {neighbors}, center std::exp(beta * ( 4*J + 2*H)), // {up, up, up, up}, down std::exp(beta * ( 4*J - 2*H)), // {dn, dn, dn, dn}, up std::exp(beta * ( 2*J + 2*H)), // {up, up, up, dn}, down std::exp(beta * ( 2*J - 2*H)), // {dn, dn, dn, up}, up std::exp(beta * ( 0*J + 2*H)), // {up, up, dn, dn}, down std::exp(beta * ( 0*J - 2*H)), // {dn, dn, up, up}, up std::exp(beta * (-2*J + 2*H)), // {up, dn, dn, dn}, down std::exp(beta * (-2*J - 2*H)), // {dn, up, up, up}, up std::exp(beta * (-4*J + 2*H)), // {dn, dn, dn, dn}, down std::exp(beta * (-4*J - 2*H)) // {up, up, up, up}, up }; const cudaError_t err_dE = cudaMemcpyToSymbol(exp_dE_beta, exp_dE, sizeof(float) * 10); assert(err_dE == 0); if(verbose) { std::cerr << "Info: precalculated exp(dE) are copied to constant memory" << std::endl; } return; } __global__ void update_field_kernel( const thrust::device_ptr<bool> spins, const thrust::device_ptr<const float> random, const std::size_t x_size, const std::size_t y_size, bool turn) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; // checkerboard pattern if ( turn && ((x+y)%2 == 1)) {return;} else if(!turn && ((x+y)%2 == 0)) {return;} const std::size_t xdim = blockDim.x * gridDim.x; const std::size_t offset = x + y * xdim; if(offset >= x_size * y_size) { return; } const std::size_t n_offset = (y+1 < y_size) ? x + (y+1) * xdim : x; const std::size_t e_offset = (x+1 < x_size) ? (x+1) + y * xdim : y * xdim; const std::size_t s_offset = (y-1 >= 0) ? x + (y-1) * xdim : x + (y_size-1) * xdim; const std::size_t w_offset = (x-1 >= 0) ? (x-1) + y * xdim : x_size - 1 + y * xdim; const bool c = spins[ offset]; // center const bool n = spins[n_offset]; // north const bool e = spins[e_offset]; // east const bool s = spins[s_offset]; // south const bool w = spins[w_offset]; // west std::size_t dJ = 0; if(c == n) {++dJ;} if(c == e) {++dJ;} if(c == s) {++dJ;} if(c == w) {++dJ;} const std::size_t dH = c ? 1 : 0; if(exp_dE_beta[dH + dJ * 2] > random[offset]) { spins[offset] = (!c); } return; } void update_field(const dim3 blocks, const dim3 threads, const thrust::device_ptr<bool> spins, const thrust::device_ptr<const float> random, const std::size_t x_size, const std::size_t y_size) { update_field_kernel<<<blocks, threads>>>( spins, random, x_size, y_size, true); update_field_kernel<<<blocks, threads>>>( spins, random, x_size, y_size, false); } __global__ void initialize_field_kernel( const thrust::device_ptr<bool> spins, const thrust::device_ptr<const float> random, const std::size_t x_size, const std::size_t y_size, const float threshold) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const std::size_t xdim = blockDim.x * gridDim.x; const std::size_t offset = x + y * xdim; if(offset >= x_size * y_size) { return; } spins.get()[offset] = random.get()[offset] > threshold; return; } void initialize_field(const dim3 blocks, const dim3 threads, const thrust::device_ptr<bool> spins, const thrust::device_ptr<const float> random, const std::size_t x_size, const std::size_t y_size, const float threshold) { initialize_field_kernel<<<blocks, threads>>>( spins, random, x_size, y_size, threshold); return; }
22,064
#include <iostream> #include <stdio.h> #define N 64 #define TPB 32 float scale(int i, int n) { return ((float) i)/((float) (n-1)); } __device__ float distance(float x1, float x2) { float x = (x1-x2)*(x1-x2); return sqrt(x); } __global__ void distanceKernel(float *d_out, float *d_in, float ref) { const int i = blockIdx.x*blockDim.x + threadIdx.x; const float x = d_in[i]; d_out[i] = distance(x,ref); printf("out [ %d ] = %f \n", i, d_out[i]); } int main() { float *in = new float; float *out = new float; const float ref = 0.5; cudaMallocManaged(&in, N*sizeof(float)); cudaMallocManaged(&out, N*sizeof(float)); for (int i=0; i<N; i++) { in[i] = scale(i,N); } distanceKernel<<<N/TPB,TPB>>>(out, in, ref); cudaDeviceSynchronize(); cudaFree(in); cudaFree(out); }
22,065
#include <curand_kernel.h> #include <curand.h> #include <chrono> #include <iostream> #include <string.h> #include <math.h> #include <time.h> #include <stdio.h> __global__ void setup_gpu_rng(long long n, curandState *rng_states, long long seed) { long long i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) curand_init(seed, i, 0, &rng_states[i]); } __global__ void gpu_estimate_pi(long long n, curandState *rng_states, long long samples, float *pi_estimates) { long long i = blockIdx.x * blockDim.x + threadIdx.x, count = 0; float x, y, z; if (i < n) { for (long long j = 0; j < samples; j++) { x = curand_uniform(&rng_states[i]); y = curand_uniform(&rng_states[i]); z = sqrt((x*x) + (y*y)); if (z <= 1.0) count++; } pi_estimates[i] = 4.0 * (float)count / (float)samples; //printf("i: %lld est: %.2f\n", i, pi_estimates[i]); } } float cpu_estimate_pi(long long n) { // Calculate PI following a Monte Carlo method float x, y, z; int count; for (int iter = 0; iter < n; iter++) { // Generate random (X,Y) points x = (float)random() / (float)RAND_MAX; y = (float)random() / (float)RAND_MAX; z = sqrt((x*x) + (y*y)); // Check if point is in unit circle if (z <= 1.0) { count++; } } return ((float)count / (float)n) * 4.0; } int main(int argc, char* argv[]) { long long samples = std::atoi(argv[1]), samples_per_thread = std::atoi(argv[2]), thread_per_block = std::atoi(argv[3]); auto start = std::chrono::steady_clock::now(); float pi = cpu_estimate_pi(samples); auto end = std::chrono::steady_clock::now(); std::cout << "CPU-PI: " << pi << std::endl; std::cout << "CPU " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << std::endl; long long blocks = (samples + thread_per_block - 1) / (thread_per_block * samples_per_thread) + 1; long long total_threads = blocks * thread_per_block; curandState *rng; cudaMalloc(&rng, sizeof(curandState) * total_threads); setup_gpu_rng<<<blocks, thread_per_block>>>(total_threads, rng, time(NULL)); float *device_pi_estimates, *host_pi_estimates; cudaMalloc(&device_pi_estimates, sizeof(float) * total_threads); host_pi_estimates = (float*)malloc(sizeof(float) * total_threads); start = std::chrono::steady_clock::now(); gpu_estimate_pi<<<blocks, thread_per_block>>>(total_threads, rng, samples_per_thread, device_pi_estimates); cudaMemcpy(host_pi_estimates, device_pi_estimates, sizeof(float) * total_threads, cudaMemcpyDeviceToHost); float gpu_pi = 0; for (long long i = 0; i < total_threads; i++) { gpu_pi += host_pi_estimates[i]; } gpu_pi /= (float)total_threads; end = std::chrono::steady_clock::now(); std::cout << "GPU-PI: " << gpu_pi << std::endl; std::cout << "GPU " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << std::endl; return 0; }
22,066
#include "io.cuh" void load_data() { FILE *f_images=fopen("../data/train-images.idx3-ubyte","rb"); FILE *f_labels=fopen("../data/train-labels.idx1-ubyte","rb"); int tmp; int magic_num; fread(&magic_num,sizeof(int),1,f_images); fread(&magic_num,sizeof(int),1,f_labels); // printf("debug:%d\n",swap_endian(magic_num)); int train_size; fread(&train_size,sizeof(int),1,f_images); fread(&train_size,sizeof(int),1,f_labels); train_size=swap_endian(train_size); // printf("debug:%d\n",swap_endian(train_size)); int rows,cols; fread(&rows,sizeof(int),1,f_images); fread(&cols,sizeof(int),1,f_images); rows=swap_endian(rows); cols=swap_endian(cols); // printf("debug:%d\n",swap_endian(rows)); // printf("debug:%d\n",swap_endian(cols)); for(int i=0;i<train_size;i++) { fread(&train_label[i],1,1,f_labels); if(i%1000==0) printf("Training labels : Already read %5d labels\r",i); // printf("%d:debug:%d\r",i,train_label[i]); // system("pause"); } printf("Training labels : Already read %5d labels\n",train_size); for(int i=0;i<train_size;i++) { for(int j=0;j<rows;j++) for(int k=0;k<cols;k++) { tmp=0; fread(&tmp,1,1,f_images); train_image[i][j][k]=tmp; train_image[i][j][k]/=255; // printf("%d %d %d debug: %f\n",i,j,k,train_image[i][j][k]); // system("pause"); } if(i%1000==0) printf("Training images : Already read %5d images\r",i); } printf("Training images : Already read %5d images\n",train_size); fclose(f_images); fclose(f_labels); f_images=fopen("../data/t10k-images.idx3-ubyte","rb"); f_labels=fopen("../data/t10k-labels.idx1-ubyte","rb"); fread(&magic_num,sizeof(int),1,f_images); fread(&magic_num,sizeof(int),1,f_labels); int test_size; fread(&test_size,sizeof(int),1,f_images); fread(&test_size,sizeof(int),1,f_labels); test_size=swap_endian(test_size); fread(&rows,sizeof(int),1,f_images); fread(&cols,sizeof(int),1,f_images); rows=swap_endian(rows); cols=swap_endian(cols); for(int i=0;i<test_size;i++) { fread(&test_label[i],1,1,f_labels); if(i%1000==0) printf("Testing labels : Already read %5d labels\r",i); } printf("Testing labels : Already read %5d labels\n",test_size); for(int i=0;i<test_size;i++) { for(int j=0;j<rows;j++) for(int k=0;k<cols;k++) { tmp=0; fread(&tmp,1,1,f_images); test_image[i][j][k]=tmp; test_image[i][j][k]/=255; } if(i%1000==0) printf("Testing images : Already read %5d images\r",i); } printf("Testing images : Already read %5d images\n\n",test_size); fclose(f_images); fclose(f_labels); } void export_params() { FILE *f_params=fopen("./params.txt","w"); fprintf(f_params,"6\n"); fprintf(f_params,"conv1bias 0 6 "); for(int i=0;i<CONV_W_NUM;i++) fprintf(f_params,"%X ", *(int *)&conv_b[i]); fprintf(f_params,"\n"); fprintf(f_params,"conv1filter 0 150 "); for(int i=0;i<CONV_W_NUM;i++) for(int j=0;j<CONV_W_SIZE;j++) for(int k=0;k<CONV_W_SIZE;k++) fprintf(f_params,"%X ", *(int *)&conv_w[i][j][k]); fprintf(f_params,"\n"); fprintf(f_params,"ip1bias 0 45 "); for(int i=0;i<FC1_SIZE;i++) fprintf(f_params,"%X ", *(int *)&fc1_b[i]); fprintf(f_params,"\n"); fprintf(f_params,"ip1filter 0 38880 "); for(int i=0;i<FC1_SIZE;i++) for(int j=0;j<CONV_W_NUM;j++) for(int k=0;k<POOL_SIZE;k++) for(int l=0;l<POOL_SIZE;l++) fprintf(f_params,"%X ", *(int *)&fc1_w[i][j][k][l]); fprintf(f_params,"\n"); fprintf(f_params,"ip2bias 0 10 "); for(int i=0;i<FC2_SIZE;i++) fprintf(f_params,"%X ", *(int *)&fc2_b[i]); fprintf(f_params,"\n"); fprintf(f_params,"ip2filter 0 450 "); for(int i=0;i<FC2_SIZE;i++) for(int j=0;j<FC1_SIZE;j++) fprintf(f_params,"%X ", *(int *)&fc2_w[i][j]); fclose(f_params); }
22,067
#include "includes.h" static __device__ float E = 2.718281828; __global__ void transformBboxSQDKernel(float *delta, float *anchor, float *res, int block_size) { int di = (blockIdx.x * block_size + threadIdx.x) * 4; float d[4] = {delta[di], delta[di+1], delta[di+2], delta[di+3]}; float a[4] = {anchor[di], anchor[di+1], anchor[di+2], anchor[di+3]}; float cx = a[0] + d[0] * a[2]; float cy = a[1] + d[1] * a[3]; float w = a[2] * (d[2] < 1 ? expf(d[2]) : d[2] * E); float h = a[3] * (d[3] < 1 ? expf(d[3]) : d[3] * E); res[di] = cx - w * 0.5; res[di+1] = cy - h * 0.5; res[di+2] = cx + w * 0.5; res[di+3] = cy + h * 0.5; }
22,068
__global__ void multiplyArray(const int* matches, const int* counts, const int N, const int M, int* counts_out){ /* Embarrassingly simple parallel multiply where output and first matrix are 1D Inputs: matches: N length array of matches (ideally ones and zeros) counts: N*M length array of counts (integers) N: listLenght M: numFacies Input/Output: counts_out: Result of element-wise multiplication of 'a' with each "row" of 'b'. */ unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; // For length of list do if(x < N){ for(int y = 0; y < M; y++){ counts_out[N*y + x] = matches[x] * counts[N*y + x]; } } }
22,069
#include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <chrono> #include <fstream> using namespace std::chrono; #define nBins 2024 __global__ void hist_device(const int *input, int *bins, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ unsigned int hist[nBins]; for (int i = threadIdx.x; i < nBins; i += blockDim.x) { hist[i] = 0; } __syncthreads(); for(int i = index; i < N; i += stride) { atomicAdd(&hist[input[i]], 1); } __syncthreads(); for (int i = threadIdx.x; i < nBins; i += blockDim.x) { bins[i + blockIdx.x * nBins] = hist[i]; } } __global__ void hist_device_saturation(const int *input, int *bins, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < nBins; i += stride) { unsigned int sum = 0; for(int k = 0; k < gridDim.x; k++) { sum += bins[i + k * nBins]; } bins[i] = sum; } } __global__ void hist_device_simple(const int *input, int *bins, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { atomicAdd(&bins[input[i]], 1); } } void hist_host(const int *input, int *bins, int N) { for (int i = 0; i < N; i++) { bins[input[i]]++; } } int main() { srand(time(NULL)); int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); int *bins; int *input; int threads_per_block = 256; int number_of_blocks = 32 * numberOfSMs; std::ofstream host_file, device_file, device_simple_file; host_file.open ("histogram_host.txt"); device_file.open ("histogram_device.txt"); device_simple_file.open ("histogram_device_simple.txt"); for(int it = 3; it <=9; it++) { const int N = pow(10, it); std::cout << "\nN: " << N << std::endl; size_t size = N * sizeof (int); size_t size_bins = number_of_blocks * nBins * sizeof (int); cudaMallocManaged(&input, size); cudaMallocManaged(&bins, size_bins); host_file << N; device_file << N; device_simple_file << N; for(int k = 0; k < 10; k++) { std::cout << "It: " << k + 1 << std::endl; for (int i = 0; i < nBins; i++) { bins[i] = 0; } for (int i = 0; i < N; i++) { input[i] = rand() % nBins; } auto start = high_resolution_clock::now(); hist_host(input, bins, N); auto stop = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(stop - start); std::cout << "Host time: " << duration.count() << std::endl; std::cout << "Last bin count: " << bins[nBins - 1] << std::endl; host_file << "\t" << duration.count(); //reset histogram for (int i = 0; i < nBins; i++) { bins[i] = 0; } cudaMemPrefetchAsync(input, size, deviceId); cudaMemPrefetchAsync(bins, size_bins, deviceId); start = high_resolution_clock::now(); hist_device <<< number_of_blocks, threads_per_block>>> (input, bins, N); hist_device_saturation <<< number_of_blocks, threads_per_block>>> (input, bins, N); cudaDeviceSynchronize(); stop = high_resolution_clock::now(); duration = duration_cast<microseconds>(stop - start); std::cout << "Device time: " << duration.count() << std::endl; cudaMemPrefetchAsync(bins, size_bins, cudaCpuDeviceId); cudaMemPrefetchAsync(input, size, cudaCpuDeviceId); std::cout << "Last bin count: " << bins[nBins - 1] << std::endl; device_file << "\t" << duration.count(); cudaDeviceSynchronize(); //reset histogram for (int i = 0; i < nBins; i++) { bins[i] = 0; } cudaMemPrefetchAsync(input, size, deviceId); cudaMemPrefetchAsync(bins, size_bins, deviceId); start = high_resolution_clock::now(); hist_device_simple <<< number_of_blocks, threads_per_block>>> (input, bins, N); cudaDeviceSynchronize(); stop = high_resolution_clock::now(); duration = duration_cast<microseconds>(stop - start); std::cout << "Device simple time: " << duration.count() << std::endl; cudaMemPrefetchAsync(bins, size_bins, cudaCpuDeviceId); cudaMemPrefetchAsync(input, size, cudaCpuDeviceId); std::cout << "Last bin count: " << bins[nBins - 1] << std::endl; device_simple_file << "\t" << duration.count(); std::cout << std::endl; } host_file << std::endl; device_file << std::endl; device_simple_file << std::endl; cudaFree(input); cudaFree(bins); } host_file.close(); device_file.close(); return 0; }
22,070
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "MatrixMul2D.h" #include "MatrixMul2D.cuh" #include "XTensorBLAS.h" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* mutilication of a dense matrix with a sparse matrix c = a * b * \alpha >> a - a dense matrix >> transposedA - indicates whether a is transposed >> aColSize - column size of matrix a >> aRowSize - row size of matrix a >> b - a sparse matrix >> transposedB - indicates whether b is transposed >> bNonZeroNum - number of non-zero items in b >> bColSize - column size of matrix b >> bRowSize - row size of matrix b >> c - the resulting (dense) matrix >> cColSize - column size of matrix c >> cRowSize - row size of matrix c >> alpha - the scaling factor */ __global__ void KernelMatrixMulDenseMSparseMV2(DTYPE * a, MATRIX_TRANS_TYPE transposedA, int aColSize, int aRowSize, void * b, MATRIX_TRANS_TYPE transposedB, int bNonZeroNum, int bColSize, int bRowSize, DTYPE * c, int cColSize, int cRowSize, DTYPE alpha) { int i = blockDim.x * blockIdx.x + threadIdx.x; char * bData = (char*)b; int tupleSize = sizeof(int) + sizeof(DTYPE); for (int k = 0; k < bNonZeroNum; k += blockDim.x) { __shared__ int bEntryRow[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int bEntryCol[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE bValue[MAX_CUDA_THREAD_NUM_PER_BLOCK]; if (k + threadIdx.x < bNonZeroNum) { /* load the sub-block of the sparse matrix b */ int key = *(int*)(bData + tupleSize * (k + threadIdx.x)); bEntryRow[threadIdx.x] = key / bRowSize; bEntryCol[threadIdx.x] = key % bRowSize; bValue[threadIdx.x] = *(DTYPE*)(bData + tupleSize * (k + threadIdx.x) + sizeof(int)); } /* synchronize to make sure the sub-block of the sparse matrix b is loaded */ __syncthreads(); if (i < cColSize) { if (transposedA == X_NOTRANS && transposedB == X_NOTRANS) { for (int m = 0; m < blockDim.x && k + m < bNonZeroNum; m++) { DTYPE * aRow = a + aRowSize * i; c[i * cRowSize + bEntryCol[m]] += aRow[bEntryRow[m]] * bValue[m] * alpha; } } else if (transposedA == X_TRANS && transposedB == X_NOTRANS) { for (int m = 0; m < blockDim.x && k + m < bNonZeroNum; m++) { DTYPE * aCol = a + i; c[i * cRowSize + bEntryCol[m]] += aCol[bEntryRow[m] * aRowSize] * bValue[m] * alpha; } } else if (transposedA == X_NOTRANS && transposedB == X_TRANS) { for (int m = 0; m < blockDim.x && k + m < bNonZeroNum; m++) { DTYPE * aRow = a + aRowSize * i; c[i * cRowSize + bEntryRow[m]] += aRow[bEntryCol[m]] * bValue[m] * alpha; } } else if (transposedA == X_TRANS && transposedB == X_TRANS) { for (int m = 0; m < blockDim.x && k + m < bNonZeroNum; m++) { DTYPE * aCol = a + i; c[i * cRowSize + bEntryRow[m]] += aCol[bEntryCol[m] * aRowSize] * bValue[m] * alpha; } } } /* synchronize to the preceding computation is done before loading new sub-blocks */ __syncthreads(); } } /* matrix multiplication (for 2d tensors) (cuda version) c = trans(a) * trans(b) * alpha + c * beta where trans() return the transposed matrix if the flag is fired >> a - tensor a >> transposedA - indicates whether the matrices in a are transposed >> b - tensor b >> transposedB - indicates whether teh matrices in b are transposed >> c - where we put a*b >> alpha - a coefficient >> beta - another coefficient */ void _CudaMatrixMul2D(const XTensor * a, MATRIX_TRANS_TYPE transposedA, const XTensor * b, MATRIX_TRANS_TYPE transposedB, XTensor * c, DTYPE alpha, DTYPE beta) { int an = transposedA == X_TRANS ? a->dimSize[1] : a->dimSize[0]; int am = transposedA == X_TRANS ? a->dimSize[0] : a->dimSize[1]; int bn = transposedB == X_TRANS ? b->dimSize[1] : b->dimSize[0]; int bm = transposedB == X_TRANS ? b->dimSize[0] : b->dimSize[1]; int cn = c->dimSize[0]; int cm = c->dimSize[1]; CheckNTErrors((a && b && c), "Empty matrices in multiplication!"); CheckNTErrors((am == bn && an == cn && bm == cm), "Unmatched matrices in multiplication!"); CheckNTErrors((a->devID >= 0), "Cuda version matrix mutiplication must be run on GPUs."); CheckNTErrors(a->devID == b->devID && a->devID == c->devID, "Matrices used in multiplication are not on the same GPU."); int devIDBackup = 0; ProtectCudaDev(a->devID, devIDBackup); /* a dense matrix multiply a dense matrix */ if (!a->isSparse && !b->isSparse) { CheckNTErrors((!c->isSparse), "Illegal use of sparse matrix in multiplication!"); cublasHandle_t * handle = a->mem == NULL ? GDevs.GetCudaHandle(a->devID) : a->mem->GetCublasHandle(); if (beta == 0) c->SetZeroAll(); if ((a->dataType == X_FLOAT && b->dataType == X_FLOAT && c->dataType == X_FLOAT) || (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16 && c->dataType == X_FLOAT16)) { _CudaBLASMatrixMUL(handle, a->data, transposedA, a->dataType, b->data, transposedB, a->dataType, c->data, c->dataType, a->dimSize[0], a->dimSize[1], b->dimSize[0], b->dimSize[1], c->dimSize[0], c->dimSize[1], alpha, beta); } else { // TODO!! ShowNTErrors("TODO!"); } } /* a dense matrix multiply a sparse matrix */ else if (!a->isSparse && b->isSparse) { CheckNTErrors(!c->isSparse, "Illegal use of sparse matrix in multiplication!"); CheckNTErrors((beta == 0 || beta == 1.0), "beta must be 0 or 1."); if (a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE && c->dataType == DEFAULT_DTYPE) { int gridSize[3], blockSize[3]; GDevs.GetCudaThread(c->devID, a->dimSize[0], gridSize, blockSize); dim3 blocks(gridSize[0]); dim3 threads(blockSize[0]); void * bData = (void*)((char*)b->data + sizeof(int)); if (beta == 0) c->SetZeroAll(); else if (beta != 1.0F) { ShowNTErrors("TODO!"); } KernelMatrixMulDenseMSparseMV2 << <blocks, threads >> >((DTYPE*)a->data, transposedA, a->dimSize[0], a->dimSize[1], bData, transposedB, b->unitNumNonZero, b->dimSize[0], b->dimSize[1], (DTYPE*)c->data, c->dimSize[0], c->dimSize[1], alpha); } else { // TODO!! ShowNTErrors("TODO!"); } } else { // TODO!! ShowNTErrors("TODO!"); } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
22,071
/* @autor José Arcos Aneas Codigo de suma de dos vectores contenidos en archivos donde la primera linea sea el numero de elemento a leer. Los archivos son pasados como argumento a la hora de ejecutar. */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <sys/time.h> using namespace std; // Funcion de "nucleo" que calcula la suma dando a cada hebra // la funcion de calcular la suma de los elemento de una posicion __global__ void VecAdd(double* A,double* B,double* C) { // extern __shared__ float sdata[]; int i=threadIdx.x; C[i]=A[i]+B[i]; } int main(int argc,char* argv[]) { // Leemos el fichero datos que cargamos desde entrada /* Esto es una actualizacion para no tener que compilar con cada archivo que se quiera leer, no la he probado en paralelo pero es identica a la secuencial y no ha dado problemas. */ // leo el fichero const char *fichero1 = argv[1]; const char *fichero2 = argv[2]; ifstream archivo1(fichero1); ifstream archivo2(fichero2); // varibles para calcular el tiempo struct timeval stop, start; // leemos el numero de filas int filas,residuo; archivo2 >> residuo; archivo1 >> filas; cout << filas << "\n"; //Leemos el contenido del fichero float aux1,aux2=0.0; int i = 0; float lista1 [filas]; float lista2 [filas]; // todas la filas for(i=0 ; i < filas; i++){ archivo1 >> aux1; archivo2 >> aux2; lista1[i]=aux1; lista2[i]=aux2; } size_t size=filas*sizeof(double); double* h_A=(double *)malloc(size); double* h_B=(double *)malloc(size); double* h_C=(double *)malloc(size); for(i=0;i<filas;i++) { h_A[i]=lista1[i]; h_B[i]=lista2[i]; } // Reservamos memoria y movemos las entradas a la memoria // del device double *d_A; cudaMalloc((void**)&d_A,size); cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice); double *d_B; cudaMalloc((void**)&d_B,size); cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice); double *d_C; cudaMalloc((void**)&d_C,size); dim3 dimblock(filas);// si el numero es muy grande podriamos tener problemas ya que podria sobrepasar el maximo. // Inicia el nucleo para calcular la suma gettimeofday(&start, NULL); VecAdd<<<1,dimblock>>>(d_A,d_B,d_C); gettimeofday(&stop, NULL); // Copiamos el resultado del host cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost); // mostramos el resultado de la suma for(i=0;i<filas;i++) { printf("%lf ",h_C[i]); } // Liberamos memoria cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); cout << "\nCodigo ejecutado en (ms) " << stop.tv_usec - start.tv_usec; // mostramos el tiempo empleado en ms //printf("\nCodigo Ejecutado en %d(ms)\n",int(stop.tv_usec - start.tv_usec)); return 0; }
22,072
#include "cuda.h" #include "stdio.h" __global__ void hello() { printf("Hello world from GPU\n"); } int main() { // will print 5 hello world. hello<<<1, 5>>>(); // reset all the resources in GPU for this process. // If no cudaDeviceReset(), no output will print, the program in CPU will just // quit without waiting for GPU response. cudaDeviceReset(); return 0; }
22,073
// includes #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> //-------------Funcion sumar velocidad __global__ void sumarvelocidad(float * pdist,int * pvec,float * psum, int node) { int nvec=9; //numero de vecinos int ndist=9; //numero de funcion de distribucion int k=0; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x<node){ //para que se paralelice en cada nodo if (y<nvec){ //para que se paralelice en cada vecino for(k=0;k<ndist;k++){ //para cada velocidad realizo la suma al no saber como paralelizar esta parte psum[(x*ndist+k)]+= pdist[((pvec[(x*nvec+y)])*ndist+k)]; } } } } // nodo == x //vecino == y //velocidad == k
22,074
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <algorithm> using namespace std; using uchar = unsigned char; __global__ void kernel(uchar* data, uchar* new_data, unsigned height, unsigned width) { float matr[3][3] = { {0.11111f, 0.11111f, 0.11111f}, {0.11111f, 0.11111f, 0.11111f}, {0.11111f, 0.11111f, 0.11111f} }; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < height && j < width) { float resB = 0.f, resG = 0.f, resR = 0.f; for (int di : {-1, 0, 1}) { for (int dj : {-1, 0, 1}) { int ni = max(0, min(i + di, (int)height - 1)); int nj = max(0, min(j + dj, (int)width - 1)); resB += (char)(matr[1 + di][1 + dj] * data[(ni * width + nj) * 3]); resG += (char)(matr[1 + di][1 + dj] * data[(ni * width + nj) * 3 + 1]); resR += (char)(matr[1 + di][1 + dj] * data[(ni * width + nj) * 3 + 2]); } } new_data[(i * width + j) * 3] = (char) resB; new_data[(i * width + j) * 3 + 1] = (char) resG; new_data[(i * width + j) * 3 + 2] = (char) resR; } } int main(){ ifstream in("picture.bmp", ios::in|ios::binary); ofstream out("new_picture.bmp", ofstream::binary); uchar *picture, *new_picture; cudaSetDeviceFlags(cudaDeviceMapHost); cudaHostAlloc(&picture, 50*1024*1024, cudaHostAllocMapped); cudaHostAlloc(&new_picture, 50*1024*1024, cudaHostAllocMapped); int len = 0; while (in.read((char*)picture+len, 1)) new_picture[len] = picture[len], len++; unsigned begin = *(unsigned*)(picture + 10); unsigned width = *(unsigned*)(picture + 18); unsigned height = *(unsigned*)(picture + 22); uchar* data = picture + begin; uchar* new_data = new_picture + begin; dim3 block(32, 32), numBlock((height+31)/32, (width+31)/32); kernel <<<numBlock, block>>> (data, new_data, height, width); cudaDeviceSynchronize(); for (int i = 0; i < len; i++) out << new_picture[i]; return 0; }
22,075
// fermi /* * Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ typedef struct __align__(1) { unsigned char r; unsigned char g; unsigned char b; } Color; extern "C" { __global__ void grayscaleKernel(const int n, float* output, const Color* input); } __global__ void grayscaleKernel(const int n, float* output, const Color* input) { const int bi = blockIdx.x; const int wti = threadIdx.y; const int tti = threadIdx.x; const int nrThreadsN = min(1024, n); const int nrThreadsNrThreadsN = min(32, nrThreadsN); const int ti = wti * (1 * nrThreadsNrThreadsN) + tti; if (ti < nrThreadsN) { const int i = bi * (1 * nrThreadsN) + ti; if (i < n) { const float r = (float) input[i].r; const float g = (float) input[i].g; const float b = (float) input[i].b; output[i] = 0.299 * r + 0.587 * g + 0.114 * b; } } }
22,076
#include<iostream> #include<math.h> #include<cooperative_groups.h> using namespace cooperative_groups; int numBlocks = 40; int blockSize = 256; __device__ int reduce_sum(thread_group g, int *temp, int val){ for (int i = g.size()/2; i>0; i /=2){ // make a val variable for each thread and take the value from the shared array temp[g.thread_rank()] = val; //sync them to prevent the race condition g.sync(); if(g.thread_rank() < i) {val += temp[g.thread_rank() +i];} g.sync(); } return val; } __device__ int block_sum(float *input, int n){ int sum = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < n/4; i += stride){ int4 in = ((int4*) input)[i]; sum += in.x + in.y + in.z + in.w; } return sum; } __global__ void sum_kernel(float *sum, float *input, int n){ int my_sum = block_sum(input, n); //shared memory, need to synchronize threads before reading or writing // extern __shared__ int temp[]; auto g = this_thread_block(); int block_sum = reduce_sum(g, temp, my_sum); if (g.thread_rank() ==0){ atomicAdd(sum, block_sum);} } int main(void){ int n = 1 << 10; int blockSize = 256; int numBlocks = (n + blockSize - 1) / blockSize; float *input, *sum; cudaMallocManaged(&input, n * sizeof(float)); cudaMallocManaged(&sum, sizeof(float)); for (int i = 0; i < n; i ++){ input[i] = (rand() %100) / 100.0; } sum_kernel <<<numBlocks, blockSize>>>(sum, input, n); printf("the final sum: %f", *sum); }
22,077
#include "includes.h" __global__ void Compute_weightdata_Kernel(float* weightdata, const float* I, const float* input, int nPixels, int nChannels, int c, float norm_for_data_term, float eps) { int bx = blockIdx.x; int tx = threadIdx.x; int x = bx*blockDim.x + tx; if (x >= nPixels) return; if (norm_for_data_term == 2) { weightdata[x] = 1; } else if (norm_for_data_term == 1) { weightdata[x] = 1.0f / (fabs(I[x] - input[x*nChannels + c]) + eps); } else { weightdata[x] = pow(fabs(I[x] - input[x*nChannels + c]) + eps, norm_for_data_term - 2); } }
22,078
#include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include<cuda.h> #include<cuda_runtime.h> #define GLOBAL_N 10 const int M = (1 << GLOBAL_N), N = (1 << GLOBAL_N), K = (1 << GLOBAL_N); /* matrix size */ #define BLOCK_SIZE (1 << 3) /* thread block size */ // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; float* elements; } Matrix; // Get a matrix element __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row+ BLOCK_SIZE * col]; return Asub; } // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel_Shared(const Matrix A, const Matrix B, Matrix C); __global__ void MatMulKernel(const Matrix A, const Matrix B, Matrix C); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul_Shared(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; { size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); } Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; { size_t size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); } // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; { size_t size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); } // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel_Shared<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); // Read C from device memory { size_t size = C.width * C.height * sizeof(float); cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); } // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel_Shared(Matrix A, Matrix B, Matrix C) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e < BLOCK_SIZE; ++e) { Cvalue += As[row][e] * Bs[e][col]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); } void MatMul(const Matrix A, const Matrix B, Matrix C) { Matrix d_A = {.width = A.width, .height = A.height}; { size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); } Matrix d_B = {.width = B.width, .height = B.height}; { size_t size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); } Matrix d_C = {.width = C.width, .height = C.height}; { size_t size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<< dimGrid, dimBlock >>>(d_A, d_B, d_C); cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); } cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } __global__ void MatMulKernel(const Matrix A, const Matrix B, Matrix C) { float Cvalue = 0.0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) { Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; } C.elements[row * C.width + col] = Cvalue; } void MatMul_CPU(const Matrix A, const Matrix B, Matrix C) { { int i = 0, j = 0, k = 0; for (i = 0; i < C.height; i++) { for (j = 0; j < C.width; j++) { C.elements[i * C.width + j] = 0.0; for (k = 0; k < A.width; k++) { C.elements[i * C.width + j] += A.elements[i * A.width + k] * B.elements[k * B.width + j]; } } } } return; } void MatMul_multiCPU(const Matrix A, const Matrix B, Matrix C) { { int i = 0, j = 0, k = 0; #pragma omp parallel for private(i, j, k) for (i = 0; i < C.height; i++) { for (j = 0; j < C.width; j++) { C.elements[i * C.width + j] = 0.0; for (k = 0; k < A.width; k++) { C.elements[i * C.width + j] += A.elements[i * A.width + k] * B.elements[k * B.width + j]; } } } } return; } void check_mat(const Matrix C, const Matrix CC) { int flg = 0; { int i = 0, j = 0; for (i = 0; i < C.height; i++) { for (j = 0; j < C.width; j++) { if (fabs(C.elements[C.width * i + C.height] - CC.elements[C.width * i + C.height]) > 1e-10) { flg = 1; } } } } printf(flg == 1 ? "Calculation error.\n" : "OK.\n"); return; } // const int M = 10, N = 10, K = 10, BS = 2; int main(int argc,char *argv[]){ Matrix A = {.width = K, .height = M, .stride = K, .elements = (float *) malloc((K * M) * sizeof(float))}; Matrix B = {.width = N, .height = K, .stride = N, .elements = (float *) malloc((N * K) * sizeof(float))}; Matrix C = {.width = N, .height = M, .stride = N, .elements = (float *) malloc((N * M) * sizeof(float))}; Matrix C2 = {.width = N, .height = M, .stride = N, .elements = (float *) malloc((N * M) * sizeof(float))}; Matrix C3 = {.width = N, .height = M, .stride = N, .elements = (float *) malloc((N * M) * sizeof(float))}; // float *pA = NULL, *pB = NULL, *pC = NULL; // __device__ Matrix dA = {.width = K, .height = M, .stride = K, .elements = (float *) malloc((K * M) * sizeof(float))}; // __device__ Matrix dB = {.width = K, .height = M, .stride = K, .elements = (float *) malloc((K * M) * sizeof(float))}; // __device__ Matrix dC = {.width = K, .height = M, .stride = K, .elements = (float *) malloc((K * M) * sizeof(float))}; // Matrix CC = {.width = N, .height = M, .stride = N, .elements = (float *) malloc((N * M) * sizeof(float))}; // set initial value { srand(248309); int i = 0; for (i = 0; i < (K * M); i++) { A.elements[i] = ((float) rand()) / ((float) RAND_MAX); // printf("A[%d] = %3.1f\n", i, A.elements[i]); } for (i = 0; i < (N * K); i++) { B.elements[i] = ((float) rand()) / ((float) RAND_MAX); // printf("B[%d] = %3.1f\n", i, B.elements[i]); } } // GPU shared-matmul { struct timeval stime; gettimeofday(&stime, NULL); MatMul_Shared(A, B, C); struct timeval etime; gettimeofday(&etime, NULL); float nettime = (etime.tv_sec - stime.tv_sec) + (etime.tv_usec - stime.tv_usec) * 1.0e-6; printf("Elapsed time[s] for GPU shared-matmul: %f\n", nettime); } // GPU matmul { struct timeval stime; gettimeofday(&stime, NULL); MatMul(A, B, C2); struct timeval etime; gettimeofday(&etime, NULL); float nettime = (etime.tv_sec - stime.tv_sec) + (etime.tv_usec - stime.tv_usec) * 1.0e-6; printf("Elapsed time[s] for GPU matmul: %f\n", nettime); } // // CPU matmul // { // struct timeval stime; // gettimeofday(&stime, NULL); // MatMul_CPU(A, B, C3); // struct timeval etime; // gettimeofday(&etime, NULL); // float nettime = (etime.tv_sec - stime.tv_sec) + (etime.tv_usec - stime.tv_usec) * 1.0e-6; // printf("Elapsed time[s] for CPU matmul: %f\n", nettime); // } // check_mat(C, C2); // check_mat(C, C3); return 0; }
22,079
#include <cuda.h> #include <stdio.h> __global__ void arrayMult(float *a, float *b, float *result, int N){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N){ result[idx] = a[idx] * b[idx]; } } int main(){ float *vector_a, *vector_b; float *dev_a, *dev_b, *dev_result; float *result; int N = 16; size_t size = N * sizeof(*vector_a); vector_a = (float *) malloc(size); vector_b = (float *) malloc(size); result = (float *) malloc(size); cudaMalloc( (void **) &dev_a, size); cudaMalloc( (void **) &dev_b, size); cudaMalloc( (void **) &dev_result, size); int i; for (i = 0; i < N; i++){ vector_a[i] = 2.f; vector_b[i] = 4.f; } cudaMemcpy(dev_a, vector_a, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, vector_b, sizeof(float) * N, cudaMemcpyHostToDevice); int blockSize = 4; int nBlocks = 4; arrayMult <<<nBlocks, blockSize>>> (dev_a, dev_b, dev_result, N); cudaMemcpy(result, dev_result, sizeof(float) * N, cudaMemcpyDeviceToHost); float dotProduct = 0; for (i = 0; i < N; i++){ dotProduct = dotProduct + result[i]; } printf("Vector_A: "); for (i = 0; i < N; i++){ printf("%f,", vector_a[i]); } printf("\nVector_B: "); for (i = 0; i < N; i++){ printf("%f,", vector_b[i]); } printf("\nResult = %f\n", dotProduct); free(vector_a); free(vector_b); free(result); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_result); }
22,080
#include <stdio.h> __global__ void compute_grid_h_kernel( double *gridh, double *xm1, double *ym1, double *zm1, int nelt, int lx1, int ly1, int lz1, int if3d, int nnel,int lxy, int lxyz ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<nnel){ int ix= id % lx1; int iy= (id/lx1)%ly1; int iz = (id / (lxy))%lz1; int e = id / (lxyz); int km1,kp1,izm,izp; int x1,x2,x3,x4,x5,x6,y1,y2,y3,y4,y5,y6,z1,z2,z3,z4,z5,z6; double a1,a2,a3,b1,b2,b3,c1,c2,c3,d1,d2,d3; if (if3d) { km1=iz-1; kp1=iz+1; izm=km1; if (km1 < 1){ izm=iz;} izp=kp1; if (kp1 > lz1) {izp=iz;} } else { izm=iz; izp=iz; } int jm1=iy-1; int jp1=iy+1; int iym=jm1; if (jm1 < 1) {iym=iy;} int iyp=jp1; if (jp1 > ly1) {iyp=iy;} int im1=ix-1; int ip1=ix+1; int ixm=im1; if (im1 < 1){ ixm=ix;} int ixp=ip1; if (ip1 > lx1) {ixp=ix;} x1 = xm1[e*lxyz+iz*lxy+iy*lx1+ixm];// possible optimizations for array reads . adeesha x2 = xm1[e*lxyz+iz*lxy+iy*lx1+ixp]; x3 = xm1[e*lxyz+iz*lxy+iym*lx1+ix]; x4 = xm1[e*lxyz+iz*lxy+iyp*lx1+ix]; x5 = xm1[e*lxyz+izm*lxy+iy*lx1+ix]; x6 = xm1[e*lxyz+izp*lxy+iy*lx1+ix]; y1 = ym1[e*lxyz+iz*lxy+iy*lx1+ixm]; y2 = ym1[e*lxyz+iz*lxy+iy*lx1+ixp]; y3 = ym1[e*lxyz+iz*lxy+iym*lx1+ix]; y4 = ym1[e*lxyz+iz*lxy+iyp*lx1+ix]; y5 = ym1[e*lxyz+izm*lxy+iy*lx1+ix]; y6 = ym1[e*lxyz+izp*lxy+iy*lx1+ix]; z1 = zm1[e*lxyz+iz*lxy+iy*lx1+ixm]; z2 = zm1[e*lxyz+iz*lxy+iy*lx1+ixp]; z3 = zm1[e*lxyz+iz*lxy+iym*lx1+ix]; z4 = zm1[e*lxyz+iz*lxy+iyp*lx1+ix]; z5 = zm1[e*lxyz+izm*lxy+iy*lx1+ix]; z6 = zm1[e*lxyz+izp*lxy+iy*lx1+ix]; a1=x2-x1; a2=y2-y1; a3=z2-z1; b1=x4-x3; b2=y4-y3; b3=z4-z3; c1=x6-x5; c2=y6-y5; c3=z6-z5; double fact; if (if3d) { fact=0.125; // h doesn't reach into corners of neighboring elements if (ixp==ix || ixm==ix){ fact=2.0*fact;} if (iym==iy || iyp==iy) {fact=2.0*fact;} if (izm==iz||izp==iz) {fact=2.0*fact;} //call cross(d,a,b); // cartesian vector cross product in gpu d1 = a2*b3 - a3*b2; d2 = a3*b1 - a1*b3; d3 = a1*b2 - a2*b1; // vector dot product to get sum double sum = c1*d1+c2*d2+c3*d3; gridh[e*lxyz+iz*lxy+iy*lx1+ix]=fact*sum; gridh[e*lxyz+iz*lxy+iy*lx1+ix]=pow(fabs(gridh[e*lxyz+iz*lxy+iy*lx1+ix]),(1.0/3.0)); } else{ fact=0.25; if (ixp==ix||ixm==ix) fact=2.0*fact; if (iym==iy||iyp==iy) fact=2.0*fact; gridh[e*lxyz+iz*lxy+iy*lx1+ix]=sqrtf(fact*fabs(a1*b2-a2*b1)); } } } extern "C" void compute_grid_h_gpu_wrapper_(int *glbblockSize1,double *d_gridh, double *d_xm1, double *d_ym1, double *d_zm1, int *nelt, int *lx1, int *ly1, int *lz1, int *if3d){ cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); printf("CUDA: Start compute_grid_h_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start compute_grid_h_gpu_wrapper values glbblockSize1=%d, nelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d\n",glbblockSize1[0],nelt[0],lx1[0],ly1[0],lz1[0],if3d[0]); int blockSize = glbblockSize1[0], gridSize; int lxy=lx1[0]*ly1[0]; int lxyz=lxy*lz1[0]; int nnel=nelt[0]*lxyz; gridSize = (int)ceil((float)nnel/blockSize); printf("CUDA: compute_grid_h_gpu_wrapper grid size = %d, block size = %d \n",gridSize,blockSize); compute_grid_h_kernel<<<gridSize, blockSize>>>(d_gridh, d_xm1, d_ym1, d_zm1, nelt[0],lx1[0],ly1[0],lz1[0],if3d[0],nnel,lxy,lxyz); cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); printf("CUDA: End compute_grid_h_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); /*printf(" $$$ compute_grid_h_gpu_wrapper check start "); for(int b=0;b<10;b++){ printf("d_gridh[%d] = %lf \n",b,d_gridh[b]); } printf(" $$$ compute_grid_h_gpu_wrapper check End ");*/ } __global__ void compute_mesh_h_kernel( double *meshh, double *xm1, double *ym1, double *zm1, int nelt, int lx1, int ly1, int lz1, int if3d, double rp , int ncrn, int lxy, int lxyz){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<nelt){ int ic1,ic2; //int ic2= id % ncrn; //int ic1= (id/ncrn)%ncrn; int e = id; int km1,kp1,izm,izp; double xcrn[8],ycrn[8],zcrn[8]; int k1=1; int k2= lz1; int j1=1; int j2= ly1; int i1=1; int i2= lx1; xcrn[0] = xm1[e*lxyz]; xcrn[1] = xm1[e*lxyz+lx1-1]; xcrn[2] = xm1[e*lxyz+(ly1-1)*lx1]; xcrn[3] = xm1[e*lxyz+(ly1-1)*lx1+lx1-1]; ycrn[0] = ym1[e*lxyz]; ycrn[1] = ym1[e*lxyz+lx1-1]; ycrn[2] = ym1[e*lxyz+(ly1-1)*lx1]; ycrn[3]= ym1[e*lxyz+(ly1-1)*lx1+lx1-1]; if (if3d) { xcrn[4] = xm1[e*lxyz+(lz1-1)*lxy]; xcrn[5] = xm1[e*lxyz+(lz1-1)*lxy+lx1-1]; xcrn[6] = xm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1]; xcrn[7] = xm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+lx1-1]; ycrn[4] = ym1[e*lxyz+(lz1-1)*lxy]; ycrn[5] = ym1[e*lxyz+(lz1-1)*lxy+lx1-1]; ycrn[6] = ym1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1]; ycrn[7] = ym1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+lx1-1]; zcrn[0] = zm1[e*lxyz]; zcrn[1] = zm1[e*lxyz+lx1-1]; zcrn[2] = zm1[e*lxyz+(ly1-1)*lx1]; zcrn[3] = zm1[e*lxyz+(ly1-1)*lx1+lx1-1]; zcrn[4] = zm1[e*lxyz+(lz1-1)*lxy]; zcrn[5] = zm1[e*lxyz+(lz1-1)*lxy+lx1-1]; zcrn[6] = zm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1]; zcrn[7] = zm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+lx1-1]; } double dist = 10e+36; for (ic1 = 0 ; ic1<ncrn;ic1++){ for (ic2 = 0; ic2<ncrn; ic2++){ if(ic2 !=ic1){ double txcrn = xcrn[ic2]-xcrn[ic1]; double tycrn = ycrn[ic2]-ycrn[ic1]; double tzcrn = zcrn[ic2]-zcrn[ic1]; double dtmp = txcrn*txcrn+tycrn*tycrn+tzcrn*tzcrn; double sqrtdtmp = sqrtf(dtmp) ; if(sqrtdtmp<dist){ dist =sqrtdtmp; } } } } meshh[e]= dist*rp; } } extern "C" void compute_mesh_h_gpu_wrapper_(int *glbblockSize2,double *d_meshh, double *d_xm1, double *d_ym1, double *d_zm1, int *nelt, int *lx1, int *ly1, int *lz1, int *if3d, double *rp, int *ncrn){ cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); printf("CUDA: Start compute_mesh_h_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start compute_mesh_h_gpu_wrapper values nelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d rp=%lf,ncrn=%d \n",nelt[0],lx1[0],ly1[0],lz1[0],if3d[0],rp[0],ncrn[0]); int blockSize = glbblockSize2[0], gridSize; gridSize = (int)ceil((float)nelt[0]/blockSize); int lxy=lx1[0]*ly1[0]; int lxyz=lz1[0]*lxy; compute_mesh_h_kernel<<<gridSize, blockSize>>>(d_meshh, d_xm1, d_ym1, d_zm1, nelt[0],lx1[0],ly1[0],lz1[0],if3d[0],rp[0],ncrn[0],lxy,lxyz); cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); printf("CUDA: End compute_mesh_h_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); /*printf(" $$$ compute_mesh_h_gpu_wrapper check start "); for(int b=0;b<10;b++){ printf("d_meshh[%d] = %lf \n",b,d_gridh[b]); } printf(" $$$ compute_mesh_h_gpu_wrapper check End ")*/ }
22,081
/* * hello.cu: * * */ #include <stdio.h> __global__ void mykernel() { } int main() { mykernel<<<1,1>>>(); printf("Hello, CUDA World!\n"); return 0; }
22,082
#include "includes.h" __global__ void transposeSmemDyn(float *out, float *in, int nx, int ny) { // dynamic shared memory extern __shared__ float tile[]; // coordinate in original matrix unsigned int ix, iy, ti, to; ix = blockDim.x * blockIdx.x + threadIdx.x; iy = blockDim.y * blockIdx.y + threadIdx.y; // linear global memory index for original matrix ti = iy * nx + ix; // thread index in transposed block unsigned int row_idx, col_idx, irow, icol; row_idx = threadIdx.y * blockDim.x + threadIdx.x; irow = row_idx / blockDim.y; icol = row_idx % blockDim.y; col_idx = icol * blockDim.x + irow; // coordinate in transposed matrix ix = blockDim.y * blockIdx.y + icol; iy = blockDim.x * blockIdx.x + irow; // linear global memory index for transposed matrix to = iy * ny + ix; // transpose with boundary test if (ix < nx && iy < ny) { // load data from global memory to shared memory tile[row_idx] = in[ti]; // thread synchronization __syncthreads(); // store data to global memory from shared memory out[to] = tile[col_idx]; } }
22,083
#include <stdio.h> /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. cudaMalloc((void **) &a_dev, sizeof(int) * n); cudaMalloc((void **) &b_dev, sizeof(int) * n); cudaMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device cudaMemcpy(a_dev, a, sizeof(int) * n, cudaMemcpyHostToDevice); cudaMemcpy(b_dev, b, sizeof(int) * n, cudaMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost); // Step 5: Free device memory cudaFree(a_dev); cudaFree(b_dev); cudaFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } int main(void) { int n = 5; // Length of the arrays int a[] = {0, 1, 2, 3, 4}; int b[] = {5, 6, 7, 8, 9}; int host_result[5]; int device_result[5]; int deviceCount; cudaGetDeviceCount(&deviceCount); int device; for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor); } printf("The CPU's answer: "); add_vectors_host(host_result, a, b, n); print_vector(host_result, n); printf("The GPU's answer: "); add_vectors_dev(device_result, a, b, n); print_vector(device_result, n); return 0; }
22,084
#include <math.h> #include <stdlib.h> #include <stdio.h> __global__ void mul_array(int n, float *a, float *b, float *c) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<n) c[i] = a[i]*b[i]; } int main() { int i, n=1000; float x; int bytes=n*sizeof(float); float *a = (float*)malloc(bytes); float *b = (float*)malloc(bytes); float *c = (float*)malloc(bytes); for(i=0; i<n; i++) { x = -5+0.01*i; a[i] = exp(-x*x/2); b[i] = sin(5*x); } for(i=0; i<n; i++) c[i] = a[i]*b[i]; float *c2 = (float*)malloc(bytes); float *a_gpu, *b_gpu, *c_gpu; cudaMalloc((void**)&a_gpu, bytes); cudaMalloc((void**)&b_gpu, bytes); cudaMalloc((void**)&c_gpu, bytes); cudaMemcpy(a_gpu, a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(b_gpu, b, bytes, cudaMemcpyHostToDevice); dim3 Db = dim3(256,1,1); // threads per block dim3 Dg = dim3(n/256+1,1); // threads per block mul_array<<<Dg,Db>>>(n, a_gpu, b_gpu, c_gpu); cudaMemcpy(c2, c_gpu, bytes, cudaMemcpyDeviceToHost); for(i=0; i<n; i++) printf("%g, %g, %g\n",c[i], c2[i], c[i]-c2[i] ); free(a); free(b); free(c); cudaFree(a_gpu); cudaFree(b_gpu); cudaFree(c_gpu); }
22,085
/************************************************************************************\ * * * Copyright � 2014 Advanced Micro Devices, Inc. * * Copyright (c) 2015 Mark D. Hill and David A. Wood * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following are met: * * * * You must reproduce the above copyright notice. * * * * Neither the name of the copyright holder nor the names of its contributors * * may be used to endorse or promote products derived from this software * * without specific, prior, written permission from at least the copyright holder. * * * * You must include the following terms in your license and/or other materials * * provided with the software. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A * * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER * * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * * OF SUCH DAMAGE. * * * * Without limiting the foregoing, the software may implement third party * * technologies for which you must obtain licenses from parties other than AMD. * * You agree that AMD has not obtained or conveyed to you, and that you shall * * be responsible for obtaining the rights to use and/or distribute the applicable * * underlying intellectual property rights related to the third party technologies. * * These third party technologies are not licensed hereunder. * * * * If you use the software (in whole or in part), you shall adhere to all * * applicable U.S., European, and other export laws, including but not limited to * * the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), * * and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant * * to Section 740.6 of the EAR, you hereby certify that, except pursuant to a * * license granted by the United States Department of Commerce Bureau of Industry * * and Security or as otherwise permitted pursuant to a License Exception under * * the U.S. Export Administration Regulations ("EAR"), you will not (1) export, * * re-export or release to a national of a country in Country Groups D:1, E:1 or * * E:2 any restricted technology, software, or source code you receive hereunder, * * or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such * * technology or software, if such foreign produced direct product is subject to * * national security controls as identified on the Commerce Control List (currently * * found in Supplement 1 to Part 774 of EAR). For the most current Country Group * * listings, or for additional information about the EAR or your obligations under * * those regulations, please refer to the U.S. Bureau of Industry and Security's * * website at http://www.bis.doc.gov/. * * * \************************************************************************************/ /** * @brief Breadth-first traversal * @param row CSR pointer array * @param col CSR column array * @param d Distance array * @param rho Rho array * @param p Dependency array * @param cont Termination variable * @param num_nodes Termination variable * @param num_edges Termination variable * @param dist Current traversal layer */ __global__ void bfs_kernel(int *row, int *col, int *d, float *rho, int *cont, const int num_nodes, const int num_edges, const int dist) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //navigate the current layer if (tid < num_nodes && d[tid] == dist) { //get the starting and ending pointers //of the neighbor list int start = row[tid]; int end; if (tid + 1 < num_nodes) end = row[tid + 1]; else end = num_edges; //navigate through the neighbor list for (int edge = start; edge < end; edge++) { int w = col[edge]; if (d[w] < 0) { *cont = 1; //traverse another layer d[w] = dist + 1; } //transfer the rho value to the neighbor if (d[w] == (dist + 1)) { atomicAdd(&rho[w], rho[tid]); } } } } /** * @brief Back traversal * @param row CSR pointer array * @param col CSR column array * @param d Distance array * @param rho Rho array * @param sigma Sigma array * @param p Dependency array * @param cont Termination variable * @param num_nodes Termination variable * @param num_edges Termination variable * @param dist Current traversal layer * @param s Source vertex * @param bc Betweeness Centrality array */ __global__ void backtrack_kernel(int *row, int *col, int *d, float *rho, float *sigma, const int num_nodes, const int num_edges, const int dist, const int s, float* bc) { int tid = blockIdx.x * blockDim.x + threadIdx.x; // Navigate the current layer if (tid < num_nodes && d[tid] == dist - 1) { int start = row[tid]; int end; if (tid + 1 < num_nodes) end = row[tid + 1]; else end = num_edges; // Get the starting and ending pointers // of the neighbor list in the reverse graph for (int edge = start; edge < end; edge++) { int w = col[edge]; // Update the sigma value traversing back if (d[w] == dist - 2) atomicAdd(&sigma[w], rho[w] / rho[tid] * (1 + sigma[tid])); } // Update the BC value if (tid != s) bc[tid] = bc[tid] + sigma[tid]; } } /** * @brief back_sum_kernel (not used) * @param s Source vertex * @param dist Current traversal layer * @param d Distance array * @param sigma Sigma array * @param bc Betweeness Centrality array * @param num_nodes Termination variable * @param num_edges Termination variable */ __global__ void back_sum_kernel(const int s, const int dist, int *d, float *sigma, float *bc, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) { // If it is not the source if (s != tid && d[tid] == dist - 1) { bc[tid] = bc[tid] + sigma[tid]; } } } /** * @brief array set 1D * @param s Source vertex * @param dist_array Distance array * @param sigma Sigma array * @param rho Rho array * @param num_nodes Termination variable */ __global__ void clean_1d_array(const int source, int *dist_array, float *sigma, float *rho, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) { sigma[tid] = 0; if (tid == source) { // If source vertex rho = 1, dist = 0 rho[tid] = 1; dist_array[tid] = 0; } else { // If other vertices rho = 0, dist = -1 rho[tid] = 0; dist_array[tid] = -1; } } } /** * @brief array set 2D * @param p Dependency array * @param num_nodes Number of vertices */ __global__ void clean_2d_array(int *p, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes * num_nodes) p[tid] = 0; } /** * @brief clean BC * @param bc_d Betweeness Centrality array * @param num_nodes Number of vertices */ __global__ void clean_bc(float *bc_d, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) bc_d[tid] = 0; }
22,086
#include<stdio.h> #include<iostream> using namespace std; __global__ void sum(int* input1,int* input2, int *Out,int size) //kernel { int i = threadIdx.x + blockDim.x * blockIdx.x; printf("\nThread id%d",threadIdx.x); if(i<size) Out[i]= input1[i] + input2[i]; __syncthreads(); } int main() { int i,n; cout<<"Enter no. of elements"; cin>>n; //read number of elements in vector int a[n],b[n],c[n]; cout<<"Enter elements of first vector"; for(int i=0;i<n;i++) cin>>a[i]; //read first vector cout<<"Enter elements of second vector"; for(int i=0;i<n;i++) cin>>b[i]; //read second vector int *a1, *b1, *c1; cudaMalloc((void **) &a1, n*sizeof(int)); //allocating memory cudaMalloc((void **) &b1, n*sizeof(int)); cudaMalloc((void **) &c1, n*sizeof(int)); for( i = 0 ; i < n ; i++) { c[i]=0; } cudaMemcpy(a1 , a, n*sizeof(int),cudaMemcpyHostToDevice); //data transfer from host memory to device memory cudaMemcpy(b1 , b, n*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(c1 , c, n*sizeof(int),cudaMemcpyHostToDevice); sum<<<1,n>>>(a1,b1,c1,n); //calling kernel cudaDeviceSynchronize(); cudaMemcpy(c, c1, n*sizeof(int),cudaMemcpyDeviceToHost); //data transfer from device memory to host memory for( i = 0 ; i < n ; i++) { cout<<"\n "<<a[i]<<" + "<<b[i]<<" = "<<c[i]; } cudaFree(a1); //freeing memory cudaFree(b1); cudaFree(c1); return 0; }
22,087
#include <iostream> #include <cuda.h> #include <cstdlib> class Unified { public: void *operator new(size_t len) { void *ptr; cudaMallocManaged(&ptr, len); return ptr; } void operator delete(void *ptr) { cudaFree(ptr); } void* operator new[] (std::size_t size) { void *ptr; cudaMallocManaged(&ptr,size); return ptr; } void operator delete[] (void* ptr) { cudaFree(ptr); } }; class publisher : public Unified { public: float value; __device__ void setValue(float v) { value=v; } }; __global__ void publish_msg(publisher *topic,float num) { int i=threadIdx.x + blockIdx.x*blockDim.x; topic[i].setValue(num); } class subscriber : public Unified { public: float value; __device__ void getValue(float v) { value=v; } }; /* GPU kernel: set an array of topic to a value */ __host__ void sub_msg(publisher *topic,int i) { std::cout<<"Topic["<<i<<"] = "<<topic[i].value<<"\n"; } int main(int argc,char *argv[]) { int n=1; int i=0; publisher *topic=new publisher[n]; publish_msg<<<1,1>>>(topic,6.9); /* GPU */ //std::cout<<"Topic["<<i<<"] = "<<topic[i].value<<"\n"; cudaDeviceSynchronize(); sub_msg(topic,i); i++; publisher *topic1=new publisher[n]; publish_msg<<<1,2>>>(topic1,7.7); //std::cout<<"Topic["<<i<<"] = "<<topic1[i].value<<"\n"; cudaDeviceSynchronize(); //sub_msg(topic1,i); return 0; }
22,088
/* This program demonstrates the basics of working with cuda. We use the GPU to add two arrays. We also introduce cuda's approach to error handling and timing using cuda Events. This is the main program. You should also look at the header add.h for the important declarations, and then look at add.cu to see how to define functions that execute on the GPU. */ #include <iostream> #include <math.h> // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord( start, 0 ); add<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); cudaEventRecord( end, 0 ); cudaEventSynchronize( end ); float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, end ); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; std::cout << "Yay! Your program's results are correct." << std::endl; std::cout << "Your program took: " << elapsedTime << " ms." << std::endl; // Cleanup in the event of success. cudaEventDestroy( start ); cudaEventDestroy( end ); // Free memory cudaFree(x); cudaFree(y); return 0; }
22,089
/* autor fredy m uaem desonses@gmail.com para mas comentarios */ #ifdef __CUDACC__ #define cuda_SYNCTHREADS() __syncthreads(); #else #define cuda_SYNCTHREADS() #endif #include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <math.h> #define N 32 /* calcula el valor aproximado de pi, realizando particiones (entre mas, hay mas aproximacion al valor) */ __host__ float func(float valor) { return 4 / (1 + powf(valor,2)); } __global__ void calcula(float *particion, float *funcion, float *sum) { //reserva dinamica de memoria compartida en tiempo de ejecucion extern __shared__ float temporal[]; float add[N]; //float h = (1 - 0) / N; int id = threadIdx.x;// +blockIdx.x * blockDim.x; float xi, xim; float yi, yim; //printf("%.2f, \n", particion[id]); xi = particion[id]; xim = particion[id - 1]; yi = funcion[id]; yim = funcion[id - 1]; add[id] = .5f * ((xi - xim) * (yi + yim)); temporal[id] = add[id]; printf("(%.4f - %.4f) * (%.4f + %.4f): %.4f\n", xi, xim, yi, yim, temporal[id]); cuda_SYNCTHREADS(); //reduccion paralela int salto = N / 2; //realizamos log2(N) iteraciones while (salto) { //solo trabajan la mitad de los hilos if (id < salto) { temporal[id] = temporal[id] + temporal[id + salto]; } //cuda_SYNCTHREADS(); cuda_SYNCTHREADS(); salto = salto / 2; } //el hilo 0 escribe el resultado final en la memoria global if (id == 0) { *sum = temporal[id]; //printf("temporal: %.3f\n", *sum); } } int main(int argc, char** argv) { float *vector1, *vector2, *resultado; float *dev_vector1, *dev_vector2, *dev_resultado; size_t size = N * sizeof(float); //reserva de memoria en el host vector1 = (float*)malloc(size); vector2 = (float*)malloc(size); resultado = (float*)malloc(size); //reserva de memoria en el device cudaMalloc((void**)&dev_vector1, size); cudaMalloc((void**)&dev_vector2, size); cudaMalloc((void**)&dev_resultado, size); // inicializacion de los vectores for (int i = 0; i < N; i++) { vector1[i] = (float)i / (N - 1); vector2[i] = func(vector1[i]); //printf("xi: %.2f, f(xi): %.2f \n", vector1[i], vector2[i]); } //enviar los datos hacia el Device cudaMemcpy(dev_vector1, vector1, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_vector2, vector2, size, cudaMemcpyHostToDevice); //cudaMemcpy(dev_resultado, resultado, size, cudaMemcpyHostToDevice); //lanzamiento del kernel con memoria dinamica compartida calcula <<<1, N, size>>>(dev_vector1, dev_vector2, dev_resultado); //recogida de los datos cudaMemcpy(resultado, dev_resultado, size, cudaMemcpyDeviceToHost); printf("pi = %.5f, \n", resultado[0]); return 0; }
22,090
#include <stdio.h> #include <iostream> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> //Copied from NVidia __device__ void sum_reduction(int *data, int *out) { unsigned int id = threadIdx.x; for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (id < s) { data[id] += data[id + s]; } __syncthreads(); } if (id == 0) out[0] = data[0]; __syncthreads(); } //Find maximum of given data, output to out __device__ void max_func(short *data, short *temp, short *out) { unsigned int tid = threadIdx.x; temp[tid] = data[tid]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { temp[tid] = ((temp[tid+s] > temp[tid]) ? temp[tid+s] : temp[tid]); } __syncthreads(); } if (tid == 0) { out[0] = temp[0]; } } //Find minimum of given data, output to out __device__ void min_func(short *data, short *temp, short *out) { unsigned int tid = threadIdx.x; temp[tid] = data[tid]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { temp[tid] = ((temp[tid+s] < temp[tid]) ? temp[tid+s] : temp[tid]); } __syncthreads(); } if (tid == 0) { out[0] = temp[0]; } } //Block scan based on NVidia implementation found here: // http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html __device__ void block_scan(int *data) { unsigned int tid = threadIdx.x; for (unsigned int d = 1; d<blockDim.x; d<<=1) { if ((tid + 1) % (d<<1) == 0) { data[tid] = data[tid] + data[tid - d]; } __syncthreads(); } if (tid==blockDim.x-1) { data[tid] = 0; } __syncthreads(); int tmp; for (unsigned int d = blockDim.x>>1; d >= 1; d>>=1) { if ((tid + 1) % (d<<1) == 0) { tmp = data[tid - d]; data[tid - d] = data[tid]; data[tid] = tmp + data[tid]; } __syncthreads(); } } //Same as previous but for floats. Should be templated __device__ void block_scan(float *data) { unsigned int tid = threadIdx.x; for (unsigned int d = 1; d<blockDim.x; d<<=1) { if ((tid + 1) % (d<<1) == 0) { data[tid] = data[tid] + data[tid - d]; } __syncthreads(); } if (tid==blockDim.x-1) { data[tid] = 0; } __syncthreads(); float tmp; for (unsigned int d = blockDim.x>>1; d >= 1; d>>=1) { if ((tid + 1) % (d<<1) == 0) { tmp = data[tid - d]; data[tid - d] = data[tid]; data[tid] = tmp + data[tid]; } __syncthreads(); } } //Radix sort based on NVidia implementation found here: // http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html __device__ void radix_sort(int *data, int *temp1, int *temp2) { unsigned int tid = threadIdx.x; unsigned int total = 0; unsigned int b = 0; for (unsigned int k=0; k<sizeof(int)*8; ++k) { b = (data[tid] & (1 << k)) == 0; //Actually opposite of bit temp1[tid] = b; temp2[tid] = b; __syncthreads(); block_scan(temp1); total = temp1[blockDim.x-1] + temp2[blockDim.x-1]; temp2[tid] = tid - temp1[tid] + total; temp1[tid] = b ? temp1[tid] : temp2[tid]; //Inverse of NVidia radix, account for b being !bit int tmp = data[tid]; __syncthreads(); data[temp1[tid]] = tmp; __syncthreads(); } } //Radix sort which modifies the key array as well as data array, //keeping them in the same relative position __device__ void radix_sort_by_key(int *keys, int *data, int *temp1, int *temp2) { unsigned int tid = threadIdx.x; unsigned int total = 0; unsigned int b = 0; for (unsigned int k=0; k<sizeof(int)*8; ++k) { b = (keys[tid] & (1 << k)) == 0; //Actually opposite of bit temp1[tid] = b; temp2[tid] = b; __syncthreads(); block_scan(temp1); total = temp1[blockDim.x-1] + temp2[blockDim.x-1]; temp2[tid] = tid - temp1[tid] + total; temp1[tid] = b ? temp1[tid] : temp2[tid]; //Inverse of NVidia radix, account for b being !bit int tmp_data = data[tid]; int tmp_key = keys[tid]; __syncthreads(); data[temp1[tid]] = tmp_data; keys[temp1[tid]] = tmp_key; __syncthreads(); } } //Test kernel to make sure utilities work, only used for debugging __global__ void test_kernel(int *test_int_data, short *test_short_data) { //Need blockdim of 256, one block __shared__ int test_int[1024]; __shared__ int temp_int_1[1024]; __shared__ int temp_int_2[1024]; __shared__ short test_short[1024]; __shared__ short temp_short[1024]; __shared__ short out[1]; unsigned int tid = threadIdx.x; test_int[tid] = 1024 - tid; temp_int_1[tid] = 1024 - tid; test_short[tid] = 1024 - tid; __syncthreads(); if (tid == 0) printf("Running test kernel\n"); max_func(test_short, temp_short, out); if (tid == 0) printf("Max: %d\n", out[0]); min_func(test_short, temp_short, out); if (tid == 0) printf("Min: %d\n", out[0]); block_scan(temp_int_1); if (tid == 0) { printf("Block scan: ["); for (unsigned int i=0; i<1024; ++i) { printf("%d, ", temp_int_1[i]); } printf("]\n"); } __syncthreads(); temp_int_1[tid] = 1024 - tid; block_scan(temp_int_1); if (tid == 0) { printf("Block scan: ["); for (unsigned int i=0; i<1024; ++i) { printf("%d, ", temp_int_1[i]); } printf("]\n"); } __syncthreads(); radix_sort(test_int, temp_int_1, temp_int_2); if (tid == 0) { printf("Radix Sort: ["); for (unsigned int i=0; i<1024; ++i) { printf("%d, ", test_int[i]); } printf("]\n"); } }
22,091
#include <algorithm> #include <iostream> #include <vector> using namespace std; typedef std::vector<double> vi; typedef vector<vector<double> > matrix; vi A; vi IA = { 0 }; vi JA; vi DA; int length; __global__ void multi(double *a, double *b, double *c, int n){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<n){ c[id] = a[id]*b[id]; } } void printMatrix(const matrix& M) { int m = M.size(); int n = M[0].size(); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) cout << M[i][j] << " "; cout << endl; } } void printlist(double* V, char* msg, int m) { cout << msg << "[ "; for( int i =0; i<m; i++){ cout << V[i] << " "; }; cout << "]" << endl; } void copyto(vi& V, double* C, int N){ // cout<<"yo"<<std::endl; // //cout<<N<<std::endl;+- for(int i =0; i<=N-1; i++) std::copy(V.begin(), V.end(), C); //} //free(C); } void printVector(const vi& V, char* msg) { cout << msg << "[ "; for_each(V.begin(), V.end(), [](double a) { cout << a << " "; }); cout << "]" << endl; } void decirculate( vi& JA, const matrix& M, const vi& IA){ int rows = M[0].size(); int itr = IA.size(); rows = rows-1; int i,j; for(i=0; i<itr-1; i++){ for(j=IA[i];j<IA[i+1];j++){ if(JA[j]<i){ JA[j] = rows - (i-JA[j]-1); } else{ JA[j] = JA[j]-i; } } } //return JA; } void extendVec(vi& A,int num,int size){ for(int i = 0; i<=num; i++){ cout<<num<<std::endl; for (int j =0; j<size; j++ ){ A.push_back(A[j]); } } } void createdense(matrix& CA, vi& JA){ int m = JA.size(); int n = CA[0].size(); for(int i =0; i<n; i++){ for(int k =0; k<m; k++){ DA.push_back(CA[JA[k]][i]); } } } void sparesify(const matrix& M) { int m = M.size(); int n = M[0].size(), i, j; int dab = 0; for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { if (M[i][j] != 0) { A.push_back(M[i][j]); JA.push_back(j); dab++; } } IA.push_back(dab); } decirculate(JA,M,IA); printMatrix(M); cout<<"++++++++++++++++++++++++++++++++++++++++++"<<std::endl; printVector(A, (char*)"A = "); printVector(IA, (char*)"IA = "); printVector(JA, (char*)"JA = "); cout<<"++++++++++++++++++++++++++++++++++++++++++"<<std::endl; } int main() { double *IN,*in; double *OUT,*out; double *ANS,*ans; matrix M = { { 0, 0, 0, 0, 1 }, { 5, 8, 0, 0, 0 }, { 0, 0, 3, 0, 0 }, { 0, 6, 0, 0, 1 }, }; matrix CA = {{1,1,1,1,1}, {2,2,2,2,2}, {3,3,3,3,3}, {4,4,4,4,4}, {5,5,5,5,5}}; sparesify(M); createdense(CA,JA); extendVec(A,DA.size()/A.size(),A.size()); cout<<DA.size()<<std::endl; length = DA.size(); int size = length*sizeof(double); cout<<size<<std::endl; int gridsize; cudaMalloc((void **) &in, size); cudaMalloc((void **) &out, size); cudaMalloc((void **) &ans, size); IN = (double *)malloc(size); OUT = (double *)malloc(size); ANS = (double *)malloc(size); cudaMalloc((void **) &in, size); cudaMalloc((void **) &out, size); cudaMalloc((void **) &ans, size); printVector(DA, (char*)"DA = "); copyto(DA,&IN[0],DA.size()); copyto(A,&OUT[0],DA.size()); printlist(&OUT[0], (char*)"Out = ",DA.size()); printlist(&IN[0], (char*)"IN = ",DA.size()); cout<<"++++++++++++++++++++++++++++++++++++++++++"<<std::endl; gridsize =ceil(size/1024); gridsize = 32; cudaMemcpy(in, IN, size, cudaMemcpyHostToDevice); cudaMemcpy(out,OUT, size, cudaMemcpyHostToDevice); multi<<<32,1024>>>(in,out,ans,DA.size()); cudaMemcpy(ANS, ans, size, cudaMemcpyDeviceToHost); printlist(&ANS[0], (char*)"ANS = ",DA.size()); free (IN); free(OUT); free(ANS); cudaFree(in); cudaFree(out); cudaFree(ans); return 0; }
22,092
#include <cuComplex.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void bpsk_decision_maker(cuFloatComplex *in, uint8_t *out, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { out[i] = in[i].x > 0; } } void exec_bpsk_decision_maker(cuFloatComplex *in, uint8_t *out, int n, int grid_size, int block_size, cudaStream_t stream) { bpsk_decision_maker<<<grid_size, block_size, 0, stream>>>(in, out, n); }
22,093
#include "includes.h" __global__ void kReadRows(float* data, float* target, int num_images, int num_modules, int num_modules_batch, int module_id_offset) { int c = blockIdx.y; int src_module_id = module_id_offset + blockIdx.x; int dst_module_id = blockIdx.x; data += num_images * (src_module_id + c * num_modules); target += num_images * (dst_module_id + c * num_modules_batch); for (int im = threadIdx.x; im < num_images; im += blockDim.x) { target[im] = data[im]; } }
22,094
#include "includes.h" const int N = 32; __global__ void mul(int* A, int* B, int* C){ int col = blockIdx.x * blockDim.x + threadIdx.x; int lig = blockIdx.y * blockDim.y + threadIdx.y; int index = lig * N + col; if (col < N && lig < N){ int inter = 0; for (int i = 0; i<N; ++i){ inter += A[lig*N + i] * B[i*N + col]; } C[index] = inter; } }
22,095
// A tiled matrix multiplication program #include "stdio.h" #include "stdlib.h" #define SIZE 512 #define TILE_WIDTH 16 // kernel definition __global__ void MatrixMulKernel(float * A,float * B,float * C,int len) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // if SIZE is not evenly divisible by TILE_WIDTH then // there will be excess threads that do nothing and should // not do anything to prevent out-of-bounds errors if ( row >= SIZE || col >= SIZE ) return; float sum=0.0; int i; for (i = 0;i < len;i++) { sum += A[ row*len + i ] * B[ i*len + col ]; } C[ row*len + col ] = sum; } int main(int argc, char ** argv) { float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE]; float * d_A, * d_B, * d_C; // initialize host matrices with arbitrary data int i; for (i=0;i<SIZE*SIZE;i++) { h_A[i] = (float)i; h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00; h_C[i] = 0.0; } // allocate space on device size_t size = SIZE*SIZE*sizeof(float); cudaMalloc(&d_A,size); cudaMalloc(&d_B,size); cudaMalloc(&d_C,size); //copy data to device cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice); cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice); cudaMemcpy(d_C,h_C,size,cudaMemcpyHostToDevice); dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); // 2d block int blocks = ( SIZE + TILE_WIDTH - 1) / TILE_WIDTH; dim3 blocksPerGrid(blocks,blocks); // 2d grid // invoke the kernel here MatrixMulKernel<<< blocksPerGrid, threadsPerBlock >>>(d_A,d_B,d_C,SIZE); // copy results back to host cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost); // Free up device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
22,096
#include <stdio.h> int main() { int nDevices; //All CUDA C Runtime API functions have a return value which can be used to check for errors that occurr during their execution //cudaPeekAtLastError(): cuda maintain a single variable for error, which is updated everytime. This method will return the value of this variable //cudaGetLastError(): this does the same as above function do, but after fetching the value it resets it to cudaSuccess //cudaDeviceSynchronize(): this check the device async errors cause by the issuing command to the device from host, this can be achieved by doing folowing as well // if (errAsync != cudaSuccess) // printf("Async kernel error: %s\n", cudaGetErrorString(cudaGetLastError()); cudaError_t err = cudaGetDeviceCount(&nDevices); if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err)); // cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } }
22,097
#include <iostream> #include <stdlib.h> #include <stdio.h> #include <cstdlib> #include <chrono> int n; __global__ void vecAdd(double *A, double *B, double *C){ int idx = blockIdx.x * blockDim.x + threadIdx.x; C[idx] = A[idx] + B[idx]; } bool checkResults(double *A, double *B, double *res_gpu){ double *res_cpu = new double[n]; auto start = std::chrono::steady_clock::now(); for (int i = 0; i < n; ++i){ res_cpu[i] = A[i] + B[i]; } auto end = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed_seconds = end-start; std::cout << "Time for CPU: " << elapsed_seconds.count() << "s\n"; int num_of_err = 0; bool is_correct = 1; for (int i = 0; i < n; ++i){ if (res_cpu[i] != res_gpu[i]){ num_of_err++; std::cout << "Error in " << i + 1 << " element;\nOn CPU " << res_cpu[i] << ", On GPU " << res_gpu[i] << ";\n"; is_correct = 0; } } if (is_correct){ std::cout << "Everything is great, results are equal!" << '\n'; } else{ std::cout << "There are " << num_of_err << " errors. Hm... maybe we did something wrong..." << '\n'; } return is_correct; } int main(int argc, char* argv[]){ sscanf(argv[1], "%d", &n); size_t bytes = n * sizeof(double); double *A = (double*)malloc(bytes); double *B = (double*)malloc(bytes); double *res = (double*)malloc(bytes); char decision; std::cout << "Do you want to fill arrays by yourself? (Y/N)" << '\n'; std::cin >> decision; switch(tolower(decision)) { case 'y': std::cout << "Please, enter first vector: "; for (int i = 0; i < n; ++i){ std::cin >> A[i]; } std::cout << "Please, enter second vector: "; for (int i = 0; i < n; ++i){ std::cin >> B[i]; } break; case 'n': for (int i = 0; i < n; ++i){ A[i] = (double(rand()) / rand()) + rand(); B[i] = (double(rand()) / rand()) + rand(); } break; default: std::cout << "You have wrote something wrong." << '\n'; } double *A_gpu, *B_gpu, *res_gpu; cudaMalloc(&A_gpu, bytes); cudaMalloc(&B_gpu, bytes); cudaMalloc(&res_gpu, bytes); cudaMemcpy(A_gpu, A, bytes, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, bytes, cudaMemcpyHostToDevice); int block_size = 1024; int grid_size = (n - 1) / block_size + 1; auto start = std::chrono::steady_clock::now(); vecAdd<<<grid_size, block_size>>>(A_gpu, B_gpu, res_gpu); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed_seconds = end-start; std::cout << "Time for GPU: " << elapsed_seconds.count() << "s\n"; cudaMemcpy(res, res_gpu, bytes, cudaMemcpyDeviceToHost); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(res_gpu); checkResults(A, B, res); /* for(int i = 0; i < n; ++i){ std::cout << res[i] << std::endl; } */ free(A); free(B); free(res); }
22,098
#pragma kernel tune(threads_per_block=32, 64, 128, 256, 512, 1024) #pragma kernel tune(items_per_thread=1, 2, 4, 8) #pragma kernel set(items_per_block=threads_per_block * items_per_thread) #pragma kernel problem_size(n) #pragma kernel block_size(threads_per_block) #pragma kernel grid_divisor(items_per_block) #pragma kernel buffers(C[n], A[n], B[n]) #pragma kernel tuning_key("vector_add_" + T) template <typename T, int items_per_thread=1> __global__ void vector_add(int n, T* C, const T* A, const T* B) { for (int k = 0; k < items_per_thread; k++) { int i = blockIdx.x * items_per_thread * blockDim.x + k * blockDim.x + threadIdx.x; if (i < n) { C[i] = A[i] + B[i]; } } }
22,099
#include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void mandelKernel() { // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; }
22,100
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <cuda.h> //Parallel (GPU) function for mean filter __global__ void meanFilter(int* imageArray, int* filteredArray, int img_height, int img_width, int window_width){ int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x >= window_width/2 && x< (img_width- window_width/2) && y >= window_width/2 && y<(img_height-window_width/2)){ int sum = 0; for(int wy = 0 - window_width/2; wy <= window_width/2 ; wy++){ for(int wx = 0 - window_width/2; wx <= window_width/2 ; wx++){ sum += imageArray[img_width*(y+wy) + (x+wx)]; } } filteredArray[img_width*y + x] = sum / (window_width*window_width); } } int* getImageArrayFromBMP(FILE* fptr, int* height, int* width){ int * imageArray; int offset; long n; fseek(fptr, 10, SEEK_SET); fread(&offset, 1, 4, fptr); fseek(fptr, 4, SEEK_CUR); fread(height, 1, 4, fptr); fread(width, 1, 4, fptr); imageArray = (int *) malloc((*height)*(*width)*sizeof(int)); fseek(fptr, offset, SEEK_SET); for(int i=0; i < (*height)*(*width); i++){ n = fread(&imageArray[i], 1, 1, fptr); if (n!=1) { printf("File not found"); } } return imageArray; } //sequential (CPU) function for mean filter void meanFilter_h(int* sourceArray, int* filteredArray, int height, int width, int window_width){ for(int y = window_width/2; y < height - (window_width/2); y++){ for(int x = window_width/2; x < width - (window_width/2); x++){ int sum = 0; for(int wy = 0 - window_width/2; wy <= window_width/2 ; wy++){ for(int wx = 0 - window_width/2; wx <= window_width/2 ; wx++){ sum += sourceArray[width*(y+wy) + (x+wx)]; } } filteredArray[width*y + x] = sum / (window_width*window_width); } } } int main(int argc,char **argv){ int *sourceImage, *filteredImage; int height,width; int window_width = 5; printf("Mean filter program\n"); FILE *fptr; fptr = fopen("puppy_1280.bmp", "r"); if(fptr == NULL) { printf("Error!"); exit(1); } sourceImage = getImageArrayFromBMP(fptr, &height, &width); fclose(fptr); filteredImage = (int *) malloc((height)*(width)*sizeof(int)); clock_t start_h=clock(); meanFilter_h(sourceImage, filteredImage, height, width, window_width); clock_t end_h = clock(); double time_h = (double)(end_h - start_h)/CLOCKS_PER_SEC; int* d_image; int* d_filteredImage; int* h_filteredImage; h_filteredImage = (int *) malloc(height*width*sizeof(int)); for(int i = 0; i< height*width; i++){ h_filteredImage[i] = 0; } cudaMalloc((void **)&d_image, height*width*sizeof(int)); cudaMalloc((void **)&d_filteredImage, height*width*sizeof(int)); cudaMemcpy(d_image, sourceImage, height*width*sizeof(int), cudaMemcpyHostToDevice); dim3 threadsPerBlock(32,32); dim3 numBlocks(1 + ((width-1)/threadsPerBlock.x), 1 + ((height-1)/threadsPerBlock.y)); clock_t start_d=clock(); meanFilter<<<numBlocks, threadsPerBlock>>>(d_image, d_filteredImage, height, width, window_width); cudaThreadSynchronize(); clock_t end_d = clock(); double time_d = (double)(end_d - start_d)/CLOCKS_PER_SEC; cudaMemcpy(h_filteredImage, d_filteredImage, height*width*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_image); cudaFree(d_filteredImage); printf("For %dx%d image and window size %d, CPU time %f is GPU time %f\n", height, width, window_width, time_h, time_d); free(filteredImage); free(sourceImage); free(h_filteredImage); return 0; }