serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
21,901
#include "includes.h" __device__ int glb_hist[COLORS]; __global__ void calc_histogram(unsigned char * img_in, int offset_start, int offset_end){ int ix = blockIdx.x * blockDim.x + threadIdx.x;; const int gridW = gridDim.x * blockDim.x; int Row, pos; __shared__ int hist[COLORS]; if (threadIdx.x < COLORS) { hist[threadIdx.x] = 0; } __syncthreads(); int RowNum = (offset_end - offset_start) / gridW; int extras = (offset_end - offset_start) % gridW; for (Row=0; Row<RowNum; Row++) { pos = Row * gridW + ix; atomicAdd(&hist[img_in[pos + offset_start]],1); } if (extras && ix < extras) { pos = Row * gridW + ix; atomicAdd(&hist[img_in[pos + offset_start]],1); } __syncthreads(); if (threadIdx.x < COLORS) { atomicAdd(&glb_hist[threadIdx.x],hist[threadIdx.x]); } } __global__ void calc_histogram(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin){ int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; const int gridW = gridDim.x * blockDim.x; int img_position = iy * gridW + ix; //thesh mesa sthn eikona synarthsh tou gridW // __shared__ hist_out[nbr_bin]; if (img_position < nbr_bin) { //allagh tou img_position me threadIdx.x se auto to if anevazei poly to contrast hist_out[img_position] = 0; } __syncthreads(); if(img_position < img_size){ atomicAdd(&hist_out[img_in[img_position]],1); } __syncthreads(); }
21,902
#include <iostream> #include <stdlib.h> #include <fstream> #include <string> #include <vector> #include <set> using namespace std; vector<string> splitpath( const string& str, const set<char> delimiters) { vector<string> result; char const* pch = str.c_str(); char const* start = pch; for(; *pch; ++pch) { if (delimiters.find(*pch) != delimiters.end()) { if (start != pch) { string str(start, pch); result.push_back(str); } else { result.push_back(""); } start = pch + 1; } } result.push_back(start); return result; } #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ cout << cudaGetErrorString(error) << endl; \ } \ } while (0) // Sumar cada columna(pixel) de las imagenes en paralelo __global__ void kernel_colSum(float *r_in, float *g_in, float *b_in, float *r_result, float *g_result, float *b_result , int nrow, int ncol) { int colIdx = threadIdx.x + blockIdx.x * blockDim.x; if (colIdx < ncol) { float sum_r=0; float sum_g=0; float sum_b=0; for (int k = 0 ; k < nrow ; k++) { sum_r+=r_in[colIdx+ncol*k]; sum_g+=g_in[colIdx+ncol*k]; sum_b+=b_in[colIdx+ncol*k]; } r_result[colIdx] = sum_r; g_result[colIdx] = sum_g; b_result[colIdx] = sum_b; } } // Dividir cada canal por la cantidad de imagenes L __global__ void kernel_colDiv(float *r_out, float *g_out, float *b_out, int nImg, int size) { int Idx = threadIdx.x + blockIdx.x * blockDim.x; if (Idx < size) { r_out[Idx] = r_out[Idx]/nImg; g_out[Idx] = g_out[Idx]/nImg; b_out[Idx] = b_out[Idx]/nImg; } } int main(int argc, char *argv[]){ string input_file_name; if (argc > 1) { input_file_name = argv[1]; } ifstream infile; infile.open(input_file_name.c_str()); int L,M,N, rows, cols, total_pixels; float *r_host, *g_host, *b_host, *r_out_host, *g_out_host, *b_out_host; float *r_dev, *g_dev, *b_dev, *r_out_dev, *g_out_dev, *b_out_dev; infile >> L >> M >> N; rows = L; cols = M*N; total_pixels = rows*cols; // Allocating matrix r_host = (float *)malloc(total_pixels * sizeof(float)); g_host = (float *)malloc(total_pixels * sizeof(float)); b_host = (float *)malloc(total_pixels * sizeof(float)); r_out_host = (float *)malloc(cols * sizeof(float)); g_out_host = (float *)malloc(cols * sizeof(float)); b_out_host = (float *)malloc(cols * sizeof(float)); // Initialize with zeros // I didn't use Calloc because it doesn't work with floats for (int j = 0; j < cols; j++) { r_out_host[j] = 0.5; g_out_host[j] = 0.5; b_out_host[j] = 0.5; } // Reading matrix for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { infile >> r_host[i*cols+j]; } for (int j = 0; j < cols; j++) { infile >> g_host[i*cols+j]; } for (int j = 0; j < cols; j++) { infile >> b_host[i*cols+j]; } } cudaEvent_t ct1, ct2; float dt; CUDA_CHECK(cudaMalloc((void**)&r_dev, total_pixels * sizeof(float))); CUDA_CHECK(cudaMalloc((void**)&g_dev, total_pixels * sizeof(float))); CUDA_CHECK(cudaMalloc((void**)&b_dev, total_pixels * sizeof(float))); // Input matrix of images CUDA_CHECK(cudaMemcpy(r_dev, r_host, total_pixels * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(g_dev, g_host, total_pixels * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(b_dev, b_host, total_pixels * sizeof(float), cudaMemcpyHostToDevice)); // Output image CUDA_CHECK(cudaMalloc((void**)&r_out_dev, cols * sizeof(float))); CUDA_CHECK(cudaMalloc((void**)&g_out_dev, cols * sizeof(float))); CUDA_CHECK(cudaMalloc((void**)&b_out_dev, cols * sizeof(float))); cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); int grid_size, block_size = 256; grid_size = (int)ceil((float) L * M * N / block_size); kernel_colSum<<<grid_size, block_size>>>(r_dev, g_dev, b_dev, r_out_dev, g_out_dev, b_out_dev, rows, cols); cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); float aux_dt = dt; float duration; cudaEventRecord(ct1); kernel_colDiv<<<grid_size, block_size>>>(r_out_dev, g_out_dev, b_out_dev, L, cols); cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); cudaDeviceSynchronize(); duration = dt+aux_dt; std::cout << "Tiempo GPU: " << duration << "[ms]" << std::endl; CUDA_CHECK(cudaMemcpy(r_out_host, r_out_dev, cols * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(g_out_host, g_out_dev, cols * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(b_out_host, b_out_dev, cols * sizeof(float), cudaMemcpyDeviceToHost)); // Dividing by L de R, G and B Channels /* for (int j = 0; j < cols; j++) { r_out_host[j] /= L; g_out_host[j] /= L; b_out_host[j] /= L; } */ set<char> delims{'/'}; vector<string> path = splitpath(input_file_name, delims); // Escribiendo resultado en archivo ofstream times_file; times_file.open("results_cuda.txt", ios_base::app); times_file << path.back() << " " << duration << "[ms]" << endl; // Printing the result file ofstream result_file; result_file.open("result_cuda_"+path.back()); result_file << M << " " << N << endl; for (int j = 0; j < cols-1; j++) { result_file << r_out_host[j] << " "; } result_file << r_out_host[cols-1] << endl; for (int j = 0; j < cols-1; j++) { result_file << g_out_host[j] << " "; } result_file << g_out_host[cols-1] << endl; for (int j = 0; j < cols-1; j++) { result_file << b_out_host[j] << " "; } result_file << b_out_host[cols-1]; CUDA_CHECK(cudaFree(r_dev)); CUDA_CHECK(cudaFree(g_dev)); CUDA_CHECK(cudaFree(b_dev)); CUDA_CHECK(cudaFree(r_out_dev)); CUDA_CHECK(cudaFree(g_out_dev)); CUDA_CHECK(cudaFree(b_out_dev)); free(r_host); free(g_host); free(b_host); free(r_out_host); free(g_out_host); free(b_out_host); times_file.close(); infile.close(); result_file.close(); return 0; }
21,903
// Compile with: // // nvcc -gencode arch=compute_50,code=compute_50 -rdc true -ptx jitlink.cu // // using the oldest supported toolkit version (10.2 at the time of writing). extern "C" __device__ int bar(int *out, int a) { *out = a * 2; return 0; } // The out argument is necessary due to Numba's CUDA calling convention, which // always reserves the first parameter for a pointer to a returned value, even // if there is no return value. extern "C" __device__ int array_mutator(void *out, int *a) { a[0] = a[1]; return 0; }
21,904
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> const int INF = (1 << 30) - 1; int vertex_num, edge_num, matrix_size; int *dist; double cal_time(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } return temp.tv_sec + (double)temp.tv_nsec / 1000000000.0; } __device__ __host__ size_t index_convert(int i, int j, int row_size) { return i * row_size + j; } void input(char *input_file_path, int &block_factor) { FILE *input_file = fopen(input_file_path, "rb"); fread(&vertex_num, sizeof(int), 1, input_file); fread(&edge_num, sizeof(int), 1, input_file); matrix_size = ceil((double)vertex_num / (double)block_factor) * block_factor; cudaMallocHost((void **)&dist, matrix_size * matrix_size * sizeof(int)); for (int i = 0; i < matrix_size; ++i) { for (int j = 0; j < matrix_size; ++j) { if (i != j) dist[index_convert(i, j, matrix_size)] = INF; else if (i < vertex_num) dist[index_convert(i, j, matrix_size)] = 0; else dist[index_convert(i, j, matrix_size)] = INF; } } int data[3]; for (int i = 0; i < edge_num; ++i) { fread(data, sizeof(int), 3, input_file); dist[index_convert(data[0], data[1], matrix_size)] = data[2]; } fclose(input_file); } void output(char *output_file_path) { FILE *output_file = fopen(output_file_path, "w"); for (int i = 0; i < vertex_num; ++i) { fwrite(&dist[index_convert(i, 0, matrix_size)], sizeof(int), vertex_num, output_file); } fclose(output_file); } __constant__ int size[3]; //matrix size, block_factor, grid_size __global__ void phase1(int *d_dist, int round) { __shared__ int share[4 * 1024]; int i = threadIdx.y; int j = threadIdx.x; int i_offset = size[1] * round; int j_offset = size[1] * round; share[index_convert(j, i, size[1])] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < size[1]; ++k) { __syncthreads(); if (share[index_convert(j, i, size[1])] > share[index_convert(j, k, size[1])] + share[index_convert(k, i, size[1])]) share[index_convert(j, i, size[1])] = share[index_convert(j, k, size[1])] + share[index_convert(k, i, size[1])]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = share[index_convert(j, i, size[1])]; } __global__ void phase2(int *d_dist, int round) { __shared__ int share[3 * 4 * 1024]; int i = threadIdx.y; int j = threadIdx.x; int i_offset, j_offset; if (blockIdx.x == 0) { i_offset = size[1] * ((round + blockIdx.y + 1) % size[2]); j_offset = size[1] * round; share[index_convert(i, j, size[1])] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; share[index_convert(i + size[1], j, size[1])] = share[index_convert(i, j, size[1])]; share[index_convert(i + 2 * size[1], j, size[1])] = d_dist[index_convert(j_offset + i, j_offset + j, size[0])]; } else { i_offset = size[1] * round; j_offset = size[1] * ((round + blockIdx.y + 1) % size[2]); share[index_convert(i, j, size[1])] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; share[index_convert(i + size[1], j, size[1])] = d_dist[index_convert(i_offset + i, i_offset + j, size[0])]; share[index_convert(i + 2 * size[1], j, size[1])] = share[index_convert(i, j, size[1])]; } #pragma unroll 32 for (int k = 0; k < size[1]; ++k) { __syncthreads(); if (share[index_convert(i, j, size[1])] > share[index_convert(i + size[1], k, size[1])] + share[index_convert(k + 2 * size[1], j, size[1])]) share[index_convert(i, j, size[1])] = share[index_convert(i + size[1], k, size[1])] + share[index_convert(k + 2 * size[1], j, size[1])]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = share[index_convert(i, j, size[1])]; } __global__ void phase3(int *d_dist, int round) { __shared__ int share[3 * 4 * 1024]; int i = threadIdx.y; int j = threadIdx.x; int i_offset = size[1] * ((round + blockIdx.y + 1) % size[2]); int j_offset = size[1] * ((round + blockIdx.x + 1) % size[2]); int r_offset = size[1] * round; share[index_convert(i, j, size[1])] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; share[index_convert(i + size[1], j, size[1])] = d_dist[index_convert(i_offset + i, r_offset + j, size[0])]; share[index_convert(i + 2 * size[1], j, size[1])] = d_dist[index_convert(r_offset + i, j_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < size[1]; ++k) { __syncthreads(); if (share[index_convert(i, j, size[1])] > share[index_convert(i + size[1], k, size[1])] + share[index_convert(k + 2 * size[1], j, size[1])]) share[index_convert(i, j, size[1])] = share[index_convert(i + size[1], k, size[1])] + share[index_convert(k + 2 * size[1], j, size[1])]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = share[index_convert(i, j, size[1])]; } int main(int argc, char **argv) { double total_time, bfd_time; timespec total_time1, total_time2, bfd_time1, bfd_time2; clock_gettime(CLOCK_MONOTONIC, &total_time1); cudaSetDevice(0); int block_factor = 32; if (argc == 4) block_factor = atoi(argv[3]); input(argv[1], block_factor); int grid_size = matrix_size / block_factor; int size_info[3] = {matrix_size, block_factor, grid_size}; cudaMemcpyToSymbol(size, size_info, 3 * sizeof(int)); int *d_dist; clock_gettime(CLOCK_MONOTONIC, &bfd_time1); cudaMalloc(&d_dist, (size_t)sizeof(int) * matrix_size * matrix_size); cudaMemcpy(d_dist, dist, (size_t)sizeof(int) * matrix_size * matrix_size, cudaMemcpyHostToDevice); dim3 block(block_factor, block_factor); dim3 grid2(2, grid_size - 1); dim3 grid3(grid_size - 1, grid_size - 1); for (int r = 0; r < grid_size; ++r) { phase1<<<1, block>>>(d_dist, r); phase2<<<grid2, block>>>(d_dist, r); phase3<<<grid3, block>>>(d_dist, r); } cudaMemcpy(dist, d_dist, (size_t)sizeof(int) * matrix_size * matrix_size, cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &bfd_time2); output(argv[2]); cudaFree(d_dist); cudaFree(dist); clock_gettime(CLOCK_MONOTONIC, &total_time2); bfd_time = cal_time(bfd_time1, bfd_time2); total_time = cal_time(total_time1, total_time2); printf(" vertex: %d\n", vertex_num); printf(" I/O time: %.5f\n", total_time - bfd_time); printf(" cal time: %.5f\n", bfd_time); printf(" runtime: %.5f\n", total_time); return 0; }
21,905
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <cmath> #include <device_functions.h> #define N 1048576 #define THREADS_PER_BLOCK 1024 #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } else { \ printf("success\n"); \ } \ } while (0) #pragma once #ifdef __INTELLISENSE__ void __syncthreads(); //Just so sync threads doesnt get underlined in red int atomicAdd(int* address, int val); #endif typedef struct Node { int sum; int fromLeft; }Node; void randomGen(int *input, int size) { srand(time(NULL)); for (int i = 0; i < size; i++) { input[i] = rand() % 100; } } __device__ void downPass(Node *nodeArray, int count) { int index = threadIdx.x + blockIdx.x * blockDim.x; //set root node if (index%N == 0) { nodeArray[0].fromLeft = 0; } if (index%(N) == 0) { nodeArray[1].fromLeft = nodeArray[0].fromLeft; nodeArray[2].fromLeft = nodeArray[0].fromLeft + nodeArray[1].sum; } __syncthreads(); int divisor = N / 2; for (int i = 1; i < count; i++) { if (index%divisor == 0) { //int random = i - 1; //int leftIndex = power(2, i) + index / divisor + 2 * random + 1; int leftIndex = 2 * i + 1; nodeArray[leftIndex].fromLeft = nodeArray[leftIndex / 2].fromLeft;//left child nodeArray[leftIndex + 1].fromLeft = nodeArray[leftIndex / 2].fromLeft + nodeArray[leftIndex].sum; divisor /= 2; } __syncthreads(); } } //Tree builds!!!! __device__ void buildTree(int *input, Node *nodeArray, int *sum, int count) { int index = threadIdx.x + blockIdx.x * blockDim.x; nodeArray[index + (N-1)].sum = input[index]; //Save all the leaf nodes int divisor = 2; for (int i = 0; i < count; i++) { //Generate all of the parent nodes if (index % divisor == 0) { nodeArray[(index+(N-1))/divisor].sum = nodeArray[(index+(N-1))/(divisor/2)].sum + nodeArray[(index + N)/(divisor/2)].sum; divisor *= 2; } __syncthreads(); } } __global__ void find_repeats(int *a, int *b, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n - 1) { if (a[index] == a[index + 1]) { b[index] = 1; } else { b[index] = 0; } } } __global__ void exclusive_scan(int *input, int *output, Node *nodeArray, int *totalSum, int iterations) { //int index = threadIdx.x + blockIdx.x * blockDim.x; buildTree(input, nodeArray, totalSum, iterations); __syncthreads(); downPass(nodeArray, iterations); } //In order to implement this we need a working version of exclusive scan __global__ void index_repeats(int *b, int *c, int *B) { } int main() { int *a, *b, *c; //host ABC int *dev_a, *dev_b, *dev_A; int *totalSum; int sum = 0; //int *dev_c, *dev_B; int size = N * sizeof(int); Node *nodeArray, *dev_nodeArray; int nodeArraySize = (2 * N - 1) * sizeof(Node); //cudamalloc a, b, and c on device memory cudaMalloc((void**)&dev_a, size); cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_A, size); cudaMalloc((void**)&totalSum, sizeof(int)); cudaMalloc((void**)&dev_nodeArray, nodeArraySize); //cudaMalloc((void**)&dev_c, size); //cudaMalloc((void**)&dev_B, size); a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); nodeArray = (Node *)malloc(nodeArraySize); //Make random array randomGen(a, N); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); //This works properly find_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_b, N); //Do exclusive scan on dev_b, store in dev_c //exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c); int iterations = log(N) / log(2); //Cannot be called from the device exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_A, dev_nodeArray, totalSum, iterations); //Create B and C with some cuda operations on dev_c //need to implement exclusive scan on dev_b in order to do index repeats //index_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c, dev_B); cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost); //cudaCheckErrors("failed"); //cudaMemcpy(a, dev_A, size, cudaMemcpyDeviceToHost); cudaMemcpy(&sum, totalSum, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(nodeArray, dev_nodeArray, nodeArraySize, cudaMemcpyDeviceToHost); //remove repeats cannot be done in parallel int j = 0; for (int i = 0; i < N; i++) { if (b[i] == 0) { c[j] = a[i]; j++; } } printf("Last element of find_repeats: %d\n", c[N-j]); //replace cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_A); cudaFree(totalSum); cudaFree(dev_nodeArray); //cudaFree(dev_c); free(a); free(b); free(c); free(nodeArray); return 0; }
21,906
#include "includes.h" #define BLOCK_SIZE 1024 #ifndef RADIUS #define RADIUS 3 #endif #ifndef ITERS #define ITERS 100 #endif #ifndef USE_L2 #define USE_L2 false #endif __global__ void stencil_no_shared(int *in, int *out) { int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex+RADIUS]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE + RADIUS]; } __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; }
21,907
//This program checks if there is a CUDA capable graphics card //and selects the best one #include <stdio.h> #include <stdlib.h> //This function checks the device (devProp) against the specifications //It returns true if the device meets specifications, false otherwise bool matchSpecs(cudaDeviceProp devProp, int specMajor, int specMinor) { //if device major is greater, return true if (devProp.major > specMajor) { return true; } //if device major is equal, look at minor else if (devProp.major == specMajor) { //if minor is less, return false if (devProp.minor < specMinor) { return false; } //if minor is greater or equal, return true else{ return true; } } //if device major is less, return false else { return false; } } //This function checks for the Device and chooses the best one bool chooseBestDevice(int specMajor, int specMinor) { //declare variables int devCount; cudaDeviceProp devProp; bool match; //get the number of CUDA devices cudaGetDeviceCount(&devCount); printf("There are %d CUDA device(s)\n", devCount); //take appropriate action based on number of devices if (devCount == 0) { printf("No CUDA capable cards found\n"); return false; } else if (devCount == 1) { printf("One CUDA capable card found\n"); cudaGetDeviceProperties(&devProp, 0); //make sure card matches minimum specifications printf("%s has capability %d.%d\n", devProp.name, devProp.major, devProp.minor); printf("Minimum Capability: %d.%d\n", specMajor, specMinor); match = matchSpecs(devProp, specMajor, specMinor); if (match) { printf("%s matches specifications\n", devProp.name); return true; } else { printf("%s does not match specifications\n", devProp.name); return false; } } else {//TO DO /*cudaDeviceProp devPropArray[devCount]; //get device properties cudaGetDeviceProperties for (int i = 0; i < devCount; i++) { cudaGetDeviceProperties(&devProp, i); devPropArray[i] = devProp; } //rank and choose a card*/ return true; } } //Arguments are Major and Minor int main(int argc, char** argv) { if (argc != 3) { printf("need major and minor version\n"); exit(0); } int major = strtol(argv[1], NULL, 10); int minor = strtol(argv[2], NULL, 10); chooseBestDevice(major, minor); }
21,908
#include <cuda.h> #include <iostream> #include <stdio.h> using namespace std; #define cudaCheck(error) \ if (error != cudaSuccess) { \ printf("Fatal error: %s at %s:%d\n", \ cudaGetErrorString(error), \ __FILE__, __LINE__); \ exit(1); \ } __global__ void cudawbfs(int *distance, unsigned int *row_ptr, int *col_ind, int nov, int *improvement, int level) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int localImprovement = 0; if(tid < nov && distance[tid] == level) { for(int e = row_ptr[tid]; e < row_ptr[tid + 1]; e++){ int adj = col_ind[e]; if(distance[adj] < 0){ distance[adj] = level + 1; localImprovement = 1; } } } if(localImprovement) { (*improvement) = localImprovement; } } void wbfs(unsigned int * row_ptr, int * col_ind, int * distance, int nov, int * d_distance, unsigned int * d_row_ptr, int * d_col_ind){ //initializations int size_of_rowptr = (nov + 1) * sizeof(int); int size_of_colind = row_ptr[nov] * sizeof(int); int *d_improvement, *d_nov, *d_level; //memory allocations cudaCheck(cudaMalloc((void**) &d_improvement, sizeof(int))); cudaCheck(cudaMalloc((void**) &d_nov, sizeof(int))); cudaCheck(cudaMalloc((void**) &d_level, sizeof(int))); cudaCheck(cudaMalloc((void**) &d_row_ptr, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_distance, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_col_ind, size_of_colind)); //memory copies cudaCheck(cudaMemcpy(d_distance, distance, size_of_rowptr, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_row_ptr, row_ptr, size_of_rowptr, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_col_ind, col_ind, size_of_colind, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_nov, &nov, sizeof(int), cudaMemcpyHostToDevice)); //start time cudaEvent_t start; cudaEvent_t stop; cudaCheck(cudaEventCreate(&start)); cudaCheck(cudaEventCreate(&stop)); cudaCheck(cudaEventRecord(start, 0)); int *improvement = new int; int level = 1; do{ (*improvement) = 0; cudaCheck(cudaMemcpy(d_improvement, improvement, sizeof(int), cudaMemcpyHostToDevice)); cudawbfs<<<(nov + 1023) / 1024, 1024>>>(d_distance, d_row_ptr, d_col_ind, nov, d_improvement, level); cudaCheck(cudaMemcpy(improvement, d_improvement, sizeof(int), cudaMemcpyDeviceToHost)); level++; } while((*improvement) == 1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsed; cudaEventElapsedTime(&elapsed, start, stop); //take value again cudaCheck(cudaMemcpy(distance, d_distance, size_of_rowptr, cudaMemcpyDeviceToHost)); //deallocations cudaCheck(cudaFree(d_row_ptr)); cudaCheck(cudaFree(d_distance)); cudaCheck(cudaFree(d_col_ind)); printf("GPU WBFS time: %f s\n", elapsed / 1000); } __global__ void cudaqbfs(int *distance, unsigned int *row_ptr, int *col_ind, int *queue, int *nextQueue, int size, int *nextSize, int level) { int index, u, v, tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < size) { u = queue[tid]; for(int e = row_ptr[u]; e < row_ptr[u + 1]; e++) { v = col_ind[e]; if (distance[v] == -1) { distance[v] = level + 1; index = atomicAdd(nextSize, 1); nextQueue[index] = v; } } } } void qbfs(unsigned int *row_ptr, int *col_ind, int *distance, int nov, int source) { int srcNeigh = row_ptr[source + 1] - row_ptr[source]; int *srcArr = new int[srcNeigh]; int index = 0; for (int i = row_ptr[source]; i < row_ptr[source + 1]; i++) { if (distance[col_ind[i]] == 1) { srcArr[index++] = col_ind[i]; } } int size_of_rowptr = (nov + 1) * sizeof(int); int size_of_colind = row_ptr[nov] * sizeof(int); unsigned int *d_row_ptr; int *d_col_ind, *d_distance, *d_queue, *d_nextQueue, *d_nextSize; cudaCheck(cudaMalloc((void**) &d_row_ptr, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_col_ind, size_of_colind)); cudaCheck(cudaMalloc((void**) &d_distance, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_queue, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_nextQueue, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_nextSize, sizeof(int))); cudaCheck(cudaMemcpy(d_distance, distance, size_of_rowptr, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_row_ptr, row_ptr, size_of_rowptr, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_col_ind, col_ind, size_of_colind, cudaMemcpyHostToDevice)); //cudaCheck(cudaMemcpy(d_queue, &source, sizeof(int), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_queue, srcArr, srcNeigh * sizeof(int), cudaMemcpyHostToDevice)); cudaEvent_t start; cudaEvent_t stop; cudaCheck(cudaEventCreate(&start)); cudaCheck(cudaEventCreate(&stop)); cudaCheck(cudaEventRecord(start, 0)); int size = srcNeigh; int *nextSize = new int; *nextSize = 0; int level = 1; do { cudaCheck(cudaMemcpy(d_nextSize, nextSize, sizeof(int), cudaMemcpyHostToDevice)); cudaqbfs<<<(size + 1023) / 1024, 1024>>>(d_distance, d_row_ptr, d_col_ind, d_queue, d_nextQueue, size, d_nextSize, level); cudaCheck(cudaMemcpy(nextSize, d_nextSize, sizeof(int), cudaMemcpyDeviceToHost)); level++; size = *nextSize; *nextSize = 0; swap(d_queue, d_nextQueue); } while(size > 0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsed; cudaEventElapsedTime(&elapsed, start, stop); cudaCheck(cudaMemcpy(distance, d_distance, size_of_rowptr, cudaMemcpyDeviceToHost)); cudaCheck(cudaFree(d_row_ptr)); cudaCheck(cudaFree(d_col_ind)); cudaCheck(cudaFree(d_distance)); cudaCheck(cudaFree(d_queue)); cudaCheck(cudaFree(d_nextQueue)); cudaCheck(cudaFree(d_nextSize)); printf("GPU QBFS time: %f s\n", elapsed / 1000); } __global__ void cudatdwbfs(int *distance, unsigned int *row_ptr, int *col_ind, int nov, int level, int *mf) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < nov && distance[tid] == level) { for(int e = row_ptr[tid]; e < row_ptr[tid + 1]; e++) { int adj = col_ind[e]; if(distance[adj] < 0) { atomicAdd(mf, -distance[adj]); distance[adj] = level + 1; } } } } __global__ void cudatdqbfs(int *distance, unsigned int *row_ptr, int *col_ind, int *queue, int *nextQueue, int size, int *nextSize, int level, int *mf) { int index, u, v, tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < size) { u = queue[tid]; for(int e = row_ptr[u]; e < row_ptr[u + 1]; e++) { v = col_ind[e]; if (distance[v] < 0) { index = atomicAdd(nextSize, 1); atomicAdd(mf, -distance[v]); distance[v] = level + 1; nextQueue[index] = v; } } } } __global__ void cudabuwbfs(int *distance, unsigned int *row_ptr_inv, int *col_ind_inv, int nov, int level, int *mf) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < nov && distance[tid] < 0) { for(int e = row_ptr_inv[tid]; e < row_ptr_inv[tid + 1]; e++) { int adj = col_ind_inv[e]; if(distance[adj] == level) { atomicAdd(mf, -distance[tid]); distance[tid] = level + 1; break; } } } } __global__ void cudabuqbfs(int *distance, unsigned int *row_ptr_inv, int *col_ind_inv, int nov, int level, int *nextSize, int *mf) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < nov && distance[tid] < 0) { for(int e = row_ptr_inv[tid]; e < row_ptr_inv[tid + 1]; e++) { int adj = col_ind_inv[e]; if(distance[adj] == level) { atomicAdd(mf, -distance[tid]); atomicAdd(nextSize, 1); distance[tid] = level + 1; break; } } } } void hybrid(unsigned int *row_ptr, unsigned int *row_ptr_inv, int *col_ind, int *col_ind_inv, int *distance, int nov, int source, double alpha) { // int init_mf? int size_of_rowptr = (nov + 1) * sizeof(int); int size_of_colind = row_ptr[nov] * sizeof(int); int *improvement = new int; unsigned int *d_row_ptr, *d_row_ptr_inv; int *d_col_ind, *d_col_ind_inv, *d_distance, *d_mf; cudaCheck(cudaMalloc((void**) &d_row_ptr, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_row_ptr_inv, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_col_ind, size_of_colind)); cudaCheck(cudaMalloc((void**) &d_col_ind_inv, size_of_colind)); cudaCheck(cudaMalloc((void**) &d_distance, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_mf, sizeof(int))); cudaCheck(cudaMemcpy(d_distance, distance, size_of_rowptr, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_row_ptr, row_ptr, size_of_rowptr, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_row_ptr_inv, row_ptr_inv, size_of_rowptr, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_col_ind, col_ind, size_of_colind, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_col_ind_inv, col_ind_inv, size_of_colind, cudaMemcpyHostToDevice)); int srcNeigh = row_ptr[source + 1] - row_ptr[source]; int *srcArr = new int[srcNeigh]; int index = 0; for (int i = row_ptr[source]; i < row_ptr[source + 1]; i++) { if (distance[col_ind[i]] == 1) { srcArr[index++] = col_ind[i]; } } int *d_queue, *d_nextQueue, *d_nextSize; cudaCheck(cudaMalloc((void**) &d_queue, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_nextQueue, size_of_rowptr)); cudaCheck(cudaMalloc((void**) &d_nextSize, sizeof(int))); //cudaCheck(cudaMemcpy(d_queue, &source, sizeof(int), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_queue, srcArr, srcNeigh * sizeof(int), cudaMemcpyHostToDevice)); int mf = row_ptr[source + 1] - row_ptr[source]; // number of traversed edges int mu = row_ptr[nov]; // total number of edges int prev_mf = -1; int level = 1; int size = srcNeigh; int *nextSize = new int; *nextSize = 0; cudaEvent_t start; cudaEvent_t stop; cudaCheck(cudaEventCreate(&start)); cudaCheck(cudaEventCreate(&stop)); cudaCheck(cudaEventRecord(start, 0)); while (mf != prev_mf) { prev_mf = mf; if (mf > mu / alpha) { cudaCheck(cudaMemcpy(d_mf, &mf, sizeof(int), cudaMemcpyHostToDevice)); cudabuwbfs<<<(nov + 1023) / 1024, 1024>>>(d_distance, d_row_ptr_inv, d_col_ind_inv, nov, level, d_mf); cudaCheck(cudaMemcpy(&mf, d_mf, sizeof(int), cudaMemcpyDeviceToHost)); } else { cudaCheck(cudaMemcpy(d_mf, &mf, sizeof(int), cudaMemcpyHostToDevice)); cudatdwbfs<<<(nov + 1023) / 1024, 1024>>>(d_distance, d_row_ptr, d_col_ind, nov, level, d_mf); cudaCheck(cudaMemcpy(&mf, d_mf, sizeof(int), cudaMemcpyDeviceToHost)); } level++; } /*while (mf != prev_mf) { prev_mf = mf; if (mf > mu / alpha) { cudaCheck(cudaMemcpy(d_mf, &mf, sizeof(int), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_nextSize, nextSize, sizeof(int), cudaMemcpyHostToDevice)); cudabuqbfs<<<(nov + 1023) / 1024, 1024>>>(d_distance, d_row_ptr_inv, d_col_ind_inv, nov, level, d_nextSize, d_mf); cudaCheck(cudaMemcpy(&mf, d_mf, sizeof(int), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(nextSize, d_nextSize, sizeof(int), cudaMemcpyDeviceToHost)); } else { cudaCheck(cudaMemcpy(d_mf, &mf, sizeof(int), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_nextSize, nextSize, sizeof(int), cudaMemcpyHostToDevice)); cudatdqbfs<<<(size + 1023) / 1024, 1024>>>(d_distance, d_row_ptr, d_col_ind, d_queue, d_nextQueue, size, d_nextSize, level, d_mf); cudaCheck(cudaMemcpy(&mf, d_mf, sizeof(int), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(nextSize, d_nextSize, sizeof(int), cudaMemcpyDeviceToHost)); } level++; size = *nextSize; *nextSize = 0; swap(d_queue, d_nextQueue); }*/ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsed; cudaEventElapsedTime(&elapsed, start, stop); cudaCheck(cudaMemcpy(distance, d_distance, size_of_rowptr, cudaMemcpyDeviceToHost)); cudaCheck(cudaFree(d_row_ptr)); cudaCheck(cudaFree(d_row_ptr_inv)); cudaCheck(cudaFree(d_col_ind)); cudaCheck(cudaFree(d_col_ind_inv)); cudaCheck(cudaFree(d_distance)); cudaCheck(cudaFree(d_mf)); cudaCheck(cudaFree(d_queue)); cudaCheck(cudaFree(d_nextQueue)); cudaCheck(cudaFree(d_nextSize)); printf("GPU Hybrid time: %f s\n", elapsed / 1000); }
21,909
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <time.h> //#include <common.h> #define M 10 #define NR_BLOCK 1024 __global__ void compute(const float * a, float * b) { int i = blockIdx.x; int j; for (j = 0; j < M; j++) { if ((i + j * NR_BLOCK) > 0 && (i + j * NR_BLOCK) < M) { b[i + j * NR_BLOCK] = 0.2 * (a[M+((i+j*NR_BLOCK)-1)] + a[M+(i+j*NR_BLOCK)] + a[M+((i+j*NR_BLOCK)+1)] + a[(i+j*NR_BLOCK)] + a[2*M+(i+j*NR_BLOCK)]); } } } struct params { float ** a; float ** b; float * c; float * d; float * c_a; float * c_b; int up, down, j; int stop; int num_pes; int mype; }; typedef struct params params_t; void foo(params_t * param) { int j = param->j; int up = param->up; int down = param->down; int num_pes = param->num_pes; int mype = param->mype; if ((mype % 2) == 1) { cudaSetDevice(1); } else { cudaSetDevice(0); } // above if (up != -1 && j == 0) { cudaMemcpy(param->c_a, param->c, M * sizeof(float), cudaMemcpyHostToDevice); } else { cudaMemcpy(param->c_a, param->a[j - 1], M * sizeof(float), cudaMemcpyHostToDevice); } // middle cudaMemcpy(&(param->c_a[M]), param->a[j], M * sizeof(float), cudaMemcpyHostToDevice); // below if (down != -1 && j == param->stop - 1) { cudaMemcpy(&(param->c_a[2 * M]), param->d, M * sizeof(float), cudaMemcpyHostToDevice); } else { cudaMemcpy(&(param->c_a[2 * M]), param->a[j + 1], M * sizeof(float), cudaMemcpyHostToDevice); } cudaMemcpy(param->c_b, param->b[j], M * sizeof(float), cudaMemcpyHostToDevice); compute<<<NR_BLOCK, 1>>>(param->c_a, param->c_b); cudaMemcpy(param->b[j], param->c_b, M * sizeof(float), cudaMemcpyDeviceToHost); }
21,910
#include "includes.h" __global__ void transposeUnroll4Col(int *in, int *out, const int nx, const int ny) { // set thread id. unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x * 4; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int ti = iy * nx + ix; // access in rows. unsigned int to = ix * ny + iy; // access in cols. if (ix + 3 * blockDim.x < nx && iy < ny) { out[ti] = in[to]; out[ti + blockDim.x] = in[to + ny * blockDim.x]; out[ti + blockDim.x * 2] = in[to + ny * blockDim.x * 2]; out[ti + blockDim.x * 3] = in[to + ny * blockDim.x * 3]; } }
21,911
#include "includes.h" __global__ void copy_kernel(double *save, double *y) { const int threadID = (blockIdx.x * blockDim.x + threadIdx.x) << 1; save[threadID] = y[threadID]; save[threadID + 1] = y[threadID + 1]; }
21,912
#include <stdio.h> #include <assert.h> #define epsilon (float)1e-5 #define DATA double #define THREADxBLOCKalongXorY 4 void MatrixMulOnHost(DATA* M, DATA* N, DATA* P, int Width) { for (int i = 0; i < Width; ++i) { for (int j = 0; j < Width; ++j) { double pvalue = 0; for (int k = 0; k < Width; ++k) { double a = M[i * Width + k]; double b = N[k * Width + j]; pvalue += a * b; } P[i * Width + j] = pvalue; } } } __global__ void MatrixMulKernel(DATA* dM, DATA* dN, DATA* dP, int Width) { // Pvalue utilizzato per il calcolo dell elemento di matrice // assegnato al thread DATA Pvalue = 0.0; int ix = blockIdx.x*blockDim.x + threadIdx.x; int iy = blockIdx.y*blockDim.y + threadIdx.y; int idx=iy*Width+ix; if(ix<Width && iy<Width) { for (int k = 0; k < Width; ++k) { DATA Melement = dM[iy*Width+k]; DATA Nelement = dN[k*Width+ix]; Pvalue += Melement * Nelement; } dP[idx] = Pvalue; } } void MatrixMulOnDevice(DATA* M, DATA* N, DATA* P, int Width, float *et) { int size = Width * Width * sizeof(DATA); cudaEvent_t start, stop; DATA *dM, *dN, *dP; int gridside = Width/THREADxBLOCKalongXorY; if(gridside*THREADxBLOCKalongXorY < Width) { gridside=gridside+1; } cudaEventCreate(&start); cudaEventCreate(&stop); // Allocazione e caricamento di M ed N sulla memoria GPU cudaMalloc(&dM, size); cudaMemcpy(dM, M, size, cudaMemcpyHostToDevice); cudaMalloc(&dN, size); cudaMemcpy(dN, N, size, cudaMemcpyHostToDevice); cudaMalloc(&dP, size); // Setup the execution configuration dim3 dimGrid(gridside, gridside); dim3 dimBlock(THREADxBLOCKalongXorY, THREADxBLOCKalongXorY); cudaEventRecord(start, 0); // Lancio dei thread per l esecuzione del kernel! printf("Num blocchi: %d -- Num Thread: %d\n", dimGrid.x, dimBlock.x); MatrixMulKernel<<<dimGrid, dimBlock>>>(dM, dN, dP, Width); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(et, start, stop); // Copia P dalla memoria GPU cudaMemcpy(P, dP, size, cudaMemcpyDeviceToHost); // Libera la memoria utilizzata per le matrici cudaFree(dM); cudaFree(dN); cudaFree(dP); cudaEventDestroy(start); cudaEventDestroy(stop); } // main int main(int argc, char** argv) { int Width; float et; DATA *M, *N, *hP, *gP; if(argc<2) { fprintf(stderr,"Usage: %s Width\n",argv[0]); exit(1); } Width=atoi(argv[1]); if(Width<1) { fprintf(stderr,"Error Width=%d, must be > 0\n",Width); exit(1); } M=(DATA *)malloc(Width*Width*sizeof(DATA)); N=(DATA *)malloc(Width*Width*sizeof(DATA)); hP=(DATA *)malloc(Width*Width*sizeof(DATA)); gP=(DATA *)malloc(Width*Width*sizeof(DATA)); if(M==NULL) { fprintf(stderr,"Could not get memory for M\n"); exit(1); } if(N==NULL) { fprintf(stderr,"Could not get memory for N\n"); exit(1); } if(hP==NULL) { fprintf(stderr,"Could not get memory for hP\n"); exit(1); } if(gP==NULL) { fprintf(stderr,"Could not get memory for gP\n"); exit(1); } memset(gP,0,Width*Width*sizeof(DATA)); memset(hP,0,Width*Width*sizeof(DATA)); for(int y=0; y<Width; y++){ printf("\n"); for(int x=0; x<Width; x++) { M[y*Width+x]=(DATA)(y*Width+x); N[y*Width+x]=(DATA)(y*Width+x); } } MatrixMulOnHost(M, N, hP, Width); MatrixMulOnDevice(M, N, gP, Width, &et); printf("\n\nInput Matrix"); for(int y=0; y<Width; y++){ printf("\n"); for(int x=0; x<Width; x++) { printf("%d ", M[y*Width+x]); } } printf("\n\nplain C"); for(int y=0; y<Width; y++){ printf("\n"); for(int x=0; x<Width; x++) { printf("%d ", hP[y*Width+x]); } } printf("\n\nGPU C"); for(int y=0; y<Width; y++){ printf("\n"); for(int x=0; x<Width; x++) { printf("%d ", gP[y*Width+x]); } } int errCnt = 0; for(int y=0; y<Width; y++){ for(int x=0; x<Width; x++) { DATA it = hP[y*Width+x]; if(fabs(it - gP[y*Width+x])> epsilon*it) errCnt++; } } if(errCnt==0) { printf("\nTEST PASSED\n"); printf("Kernel execution time=%f milliseconds\n",et); } else { printf("\n\nTEST FAILED: number of errors: %d\n", errCnt); } }
21,913
//pass //--blockDim=64 --gridDim=64 --no-inline #include <cuda.h> #include <assert.h> #define N 2//64 __global__ void foo(int* A) { //__assert(__all(threadIdx.x < blockDim.x)); assert(threadIdx.x < blockDim.x); } int main(){ int *a,*dev_a; a = (int*)malloc(N*sizeof(int)); cudaMalloc((void**)&dev_a,N*sizeof(int)); cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice); foo<<<1,N>>>(dev_a); //ESBMC_verify_kernel(foo,1,N,dev_a); cudaFree(dev_a); free(a); }
21,914
#include <stdio.h> #include <stdlib.h> #define SIZE 512 // This example is adapted from an example in Nvidia CUDA C Programming Guide 4.0 __global__ void demo(int * input,int* output) { int tid = threadIdx.x; int ref1 = input[tid]; //These two syncthreads call can make sure memoey coherence. //__syncthreads(); input[tid + 1] = 2; //__syncthreads(); int ref2 = input[tid]; output[tid] = ref1 * ref2; } int main(int argc , char **argv){ cudaError_t err; int * input; err=cudaMalloc((void**)&input,(SIZE+1)*sizeof(int)); if( err != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } int * output; err=cudaMalloc((void**)&output,SIZE*sizeof(int)); if( err != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } int * temp; temp=(int *)malloc((SIZE+1)*sizeof(int)); int i; for(i=0;i<(SIZE+1);i++){ temp[i]=1; } err=cudaMemcpy( input, temp, sizeof(int)*(SIZE+1), cudaMemcpyHostToDevice); if( err != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } dim3 dimGrid(1,1); dim3 dimBlock(SIZE,1); demo<<<dimGrid,dimBlock>>>(input,output); err=cudaMemcpy( temp, output, sizeof(int)*(SIZE), cudaMemcpyDeviceToHost); if( err != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } for(i=0;i<SIZE;i++){ printf("%dth element is %d\n",i,temp[i]); } free(temp); err=cudaFree(input); if( err != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } err=cudaFree(output); if( err != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } return 0; }
21,915
#define d_vx(z,x) d_vx[(x)*(nz)+(z)] #define d_vz(z,x) d_vz[(x)*(nz)+(z)] #define d_sxx(z,x) d_sxx[(x)*(nz)+(z)] #define d_szz(z,x) d_szz[(x)*(nz)+(z)] #define d_sxz(z,x) d_sxz[(x)*(nz)+(z)] #define d_mem_dszz_dz(z,x) d_mem_dszz_dz[(x)*(nz)+(z)] #define d_mem_dsxz_dx(z,x) d_mem_dsxz_dx[(x)*(nz)+(z)] #define d_mem_dsxz_dz(z,x) d_mem_dsxz_dz[(x)*(nz)+(z)] #define d_mem_dsxx_dx(z,x) d_mem_dsxx_dx[(x)*(nz)+(z)] #define d_mem_dvz_dz(z,x) d_mem_dvz_dz[(x)*(nz)+(z)] #define d_mem_dvz_dx(z,x) d_mem_dvz_dx[(x)*(nz)+(z)] #define d_mem_dvx_dz(z,x) d_mem_dvx_dz[(x)*(nz)+(z)] #define d_mem_dvx_dx(z,x) d_mem_dvx_dx[(x)*(nz)+(z)] #define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)] #define d_Den(z,x) d_Den[(x)*(nz)+(z)] #define d_Mu(z,x) d_Mu[(x)*(nz)+(z)] #define d_ave_Mu(z,x) d_ave_Mu[(x)*(nz)+(z)] #define d_ave_Byc_a(z,x) d_ave_Byc_a[(x)*(nz)+(z)] #define d_ave_Byc_b(z,x) d_ave_Byc_b[(x)*(nz)+(z)] __global__ void el_stress_adj( float *d_vz, float *d_vx, float *d_szz, float *d_sxx, float *d_sxz, \ float *d_mem_dszz_dz, float *d_mem_dsxz_dx, float *d_mem_dsxz_dz, float *d_mem_dsxx_dx, \ float *d_mem_dvz_dz, float *d_mem_dvz_dx, float *d_mem_dvx_dz, float *d_mem_dvx_dx, \ float *d_Lambda, float *d_Mu, float *d_ave_Mu, float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b, \ float *d_K_z_half, float *d_a_z_half, float *d_b_z_half, \ float *d_K_x_half, float *d_a_x_half, float *d_b_x_half, \ float *d_K_z, float *d_a_z, float *d_b_z, \ float *d_K_x, float *d_a_x, float *d_b_x, \ int nz, int nx, float dt, float dz, float dx, int nPml, int nPad){ int gidz = blockIdx.x*blockDim.x + threadIdx.x; int gidx = blockIdx.y*blockDim.y + threadIdx.y; float dphi_xz_x_dx = 0.0; float dvz_dx = 0.0; float dphi_xz_z_dz = 0.0; float dvx_dz = 0.0; float dphi_xx_x_dx = 0.0; float dvx_dx = 0.0; float dphi_zz_z_dz = 0.0; float dvz_dz = 0.0; float c1 = 9.0/8.0; float c2 = 1.0/24.0; float lambda = d_Lambda(gidz,gidx); float mu = d_Mu(gidz,gidx); if (gidz>=2 && gidz<=nz-nPad-3 && gidx>=2 && gidx<=nx-3) { dphi_xz_x_dx = (-c1*(d_mem_dsxz_dx(gidz,gidx+1)-d_mem_dsxz_dx(gidz,gidx)) \ + c2*(d_mem_dsxz_dx(gidz,gidx+2)-d_mem_dsxz_dx(gidz,gidx-1)))/dx; dvz_dx = (-c1*(d_vz(gidz,gidx+1)-d_vz(gidz,gidx)) + c2*(d_vz(gidz,gidx+2)-d_vz(gidz,gidx-1)))/dx; dphi_xz_z_dz = (-c1*(d_mem_dsxz_dz(gidz+1,gidx)-d_mem_dsxz_dz(gidz,gidx)) \ + c2*(d_mem_dsxz_dz(gidz+2,gidx)-d_mem_dsxz_dz(gidz-1,gidx)))/dz; dvx_dz = (-c1*(d_vx(gidz+1,gidx)-d_vx(gidz,gidx)) + c2*(d_vx(gidz+2,gidx)-d_vx(gidz-1,gidx)))/dz; // update sxz d_sxz(gidz,gidx) += d_a_x[gidx]*dphi_xz_x_dx + dvz_dx/d_K_x[gidx]*d_ave_Byc_a(gidz,gidx)*dt \ + d_a_z[gidz]*dphi_xz_z_dz + dvx_dz/d_K_z[gidz]*d_ave_Byc_b(gidz,gidx)*dt; // update psi_zx and psi_xz // if(gidx<nPml || gidx>nx-nPml-1){ d_mem_dvz_dx(gidz,gidx) = d_b_x_half[gidx]*d_mem_dvz_dx(gidz,gidx) + d_sxz(gidz,gidx)*d_ave_Mu(gidz,gidx)*dt; // } // if(gidz<nPml || gidz>nz-nPml-nPad-1){ d_mem_dvx_dz(gidz,gidx) = d_b_z_half[gidz]*d_mem_dvx_dz(gidz,gidx) + d_sxz(gidz,gidx)*d_ave_Mu(gidz,gidx)*dt; // } dphi_xx_x_dx = (-c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \ + c2*(d_mem_dsxx_dx(gidz,gidx+1)-d_mem_dsxx_dx(gidz,gidx-2)))/dx; dvx_dx = (-c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) + c2*(d_vx(gidz,gidx+1)-d_vx(gidz,gidx-2)))/dx; dphi_zz_z_dz = (-c1*(d_mem_dszz_dz(gidz,gidx)-d_mem_dszz_dz(gidz-1,gidx)) \ + c2*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz-2,gidx)))/dz; dvz_dz = (-c1*(d_vz(gidz,gidx)-d_vz(gidz-1,gidx)) + c2*(d_vz(gidz+1,gidx)-d_vz(gidz-2,gidx)))/dz; // update sxx and szz d_sxx(gidz,gidx) += d_a_x_half[gidx]*dphi_xx_x_dx \ + d_ave_Byc_b(gidz, gidx)*dvx_dx/d_K_x_half[gidx]*dt;; d_szz(gidz,gidx) += d_a_z_half[gidz]*dphi_zz_z_dz \ + d_ave_Byc_a(gidz, gidx)*dvz_dz/d_K_z_half[gidz]*dt; // update psi_xx and psi_zz // if(gidx<nPml || gidx>nx-nPml-1){ d_mem_dvx_dx(gidz, gidx) = d_b_x[gidx]*d_mem_dvx_dx(gidz, gidx) + lambda*d_szz(gidz, gidx)*dt \ + (lambda+2.0*mu)*d_sxx(gidz,gidx)*dt; // } // if(gidz<nPml || (gidz>nz-nPml-nPad-1)){ d_mem_dvz_dz(gidz, gidx) = d_b_z[gidz]*d_mem_dvz_dz(gidz, gidx) + (lambda+2.0*mu)*d_szz(gidz, gidx)*dt \ + lambda*d_sxx(gidz,gidx)*dt; // } } else { return; } }
21,916
#include <iostream> using namespace std; __global__ void add_gpu(const int N, float *a, float *b, float *result) { int index = blockDim.x * blockIdx.x + threadIdx.x; // Stride style loop const int stride = gridDim.x * blockDim.x; for (; index < N; index += stride) { result[index] = a[index] + b[index]; } // Direct approach // if (index < N) { // result[index] = a[index] + b[index]; // } } int main() { int N = 1 << 20; cout << "Adding two " << N << " vectors" << std::endl; float *a, *b, *result; const int vectorMemSize = N * sizeof(float); // Forcing GPU allocation as we want to profile // time spent on kernel cudaMallocManaged(&a, vectorMemSize); cudaMallocManaged(&b, vectorMemSize); cudaMallocManaged(&result, vectorMemSize); for (int i = 0; i < N; i++) { a[i] = 1.0; b[i] = 3.0; } cudaMemPrefetchAsync(a, vectorMemSize, 0); cudaMemPrefetchAsync(b, vectorMemSize, 0); cudaMemPrefetchAsync(result, vectorMemSize, 0); const int blockSize = 128; const int numBlocks = N / blockSize + 1; // const int numBlocks = 1; add_gpu<<<numBlocks, blockSize>>>(N, a, b, result); cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) cout << cudaGetErrorString(err); cudaDeviceSynchronize(); double errorSum = 0; for (int i = 0; i < N; i++) { errorSum += abs(result[i] - 4.0); } cout << "Total error: " << errorSum << endl; cudaFree(&a); cudaFree(&b); cudaFree(&result); return 0; }
21,917
#include "includes.h" // ERROR CHECKING MACROS ////////////////////////////////////////////////////// __global__ void buildGlobalQuadReg(int noPoints, int noDims, int dimRes, int nYears, int noControls, int year, int control, float* regCoeffs, float* xmins, float* xmaxes, float* regression) { // Global thread index int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < noPoints) { // First deconstruct the index into the index along each dimension int *dimIdx; dimIdx = (int*)malloc(noDims*sizeof(int)); int rem = idx; for (int ii = 0; ii < noDims; ii++) { int div = (int)(rem/pow(dimRes,noDims-ii-1)); dimIdx[ii] = div; rem = rem - div*pow(dimRes,noDims-ii-1); } // Get the query point coordinates float *xQ; xQ = (float*)malloc(noDims*sizeof(float)); for (int ii = 0; ii < noDims; ii++) { xQ[ii] = ((float)dimIdx[ii])*(xmaxes[control*noDims + ii] - xmins[control*noDims + ii])/(float)dimRes + xmins[control*noDims + ii]; } // Use the regression coefficients to compute the value at this query // point // Constant float computed = regCoeffs[0]; // Linear Terms for (int ii = 0; ii < noDims; ii++) { computed += xQ[ii]*regCoeffs[ii+1]; } // Quadratic and Interacting Terms int counter = 0; for (int ii = 0; ii < noDims; ii++) { for (int jj = ii; jj < noDims; jj++) { computed += xQ[ii]*xQ[jj]*regCoeffs[counter+1+noDims]; counter++; } } // We know that a payoff cannot be greater than zero, so we adjust all // conditional payoffs greater than zero to be zero. if (computed >= 0.0) { computed = 0.0; } regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,noDims)*2) + dimRes* noDims + idx] = computed; // Free memory free(xQ); free(dimIdx); } }
21,918
#include <stdio.h> #include <cuda.h> __global__ void MyKernel() { printf("blockIdx.x=%u,ThreadIdx.x=%u\n",blockIdx.x,threadIdx.x); return; } int main() { printf("Kernel (Blocks x Threads)\n"); MyKernel<<<1, 2>>>(); printf("\n\n****Kernel (1x2) launched****\n\n"); cudaDeviceSynchronize(); printf("\n****Kernel finished****\n\n"); MyKernel<<<2, 1>>>(); printf("\n\n****Kernel (2x1) launched****\n\n"); cudaDeviceSynchronize(); printf("\n****Kernel finished****\n\n"); MyKernel<<<2, 2>>>(); printf("\n\n****Kernel (2x2) launched****\n\n"); cudaDeviceSynchronize(); printf("\n****Kernel finished****\n\n"); return 0; }
21,919
#include "includes.h" __global__ void scatter(int *d_array , int *d_predicateArray, int *d_scanArray,int *d_compactedArray, int d_numberOfElements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < d_numberOfElements) { if(d_predicateArray[index]==1) { d_compactedArray[d_scanArray[index]-1] = d_array[index]; } } }
21,920
#include <stdio.h> __global__ void add_kernel(int *a, int *b, int *c) { *c = *a + *b; } int main() { // on Host int a, b, c; // copy on Device int *d_a, *d_b, *d_c; int size = sizeof(int); // allocate memory on device //use a pointer to address to be populated cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // initialize a, b a = 4; b = 2; // copy memory from host to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); // luanch the kernel to compute add_kernel<<<1, 1>>>(d_a, d_b, d_c); // since the result we need still be stored at device // so we have to copy it to host memory cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); printf("res: %d\n", c); // all are done, so we can free all the memory we have allocated cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
21,921
#include <stdio.h> int main ( int argc, char *argv[ ] ) { int arr[ 6 ] = { 0, 1, 2, 3, 5, 8 }; char *filename = argv[ 1 ]; FILE *fp; fp = fopen( filename, "w" ); int x = 0; while( x < 6 ) { fprintf( fp, " %d ", arr[ x ] ); x++; } printf( "\n File %s was created and written with data \n\n", filename ); }
21,922
#include <stdlib.h> #include <stdio.h> //AQUEST ÉS EL CODI PROPORCIONAT PELS NOSTRES COMPANYS __device__ void mergeDevice(int *list, int *sorted, int start, int mid, int end) { int ti=start, i=start, j=mid; while (i<mid || j<end) { if (j==end) sorted[ti] = list[i++]; else if (i==mid) sorted[ti] = list[j++]; else if (list[i]<list[j]) sorted[ti] = list[i++]; else sorted[ti] = list[j++]; ti++; } for (ti=start; ti<end; ti++) list[ti] = sorted[ti]; } void mergeHost(int *list, int *sorted, int start, int mid, int end) { int ti=start, i=start, j=mid; while (i<mid || j<end) { if (j==end) sorted[ti] = list[i++]; else if (i==mid) sorted[ti] = list[j++]; else if (list[i]<list[j]) sorted[ti] = list[i++]; else sorted[ti] = list[j++]; ti++; } for (ti=start; ti<end; ti++) list[ti] = sorted[ti]; } __device__ void mergeSortKernel(int *list, int *sorted, int start, int end) { //Final 1: hi ha mes threads que elements del vector if (end-start<2) return; mergeSortKernel(list, sorted, start, start + (end-start)/2); mergeSortKernel(list, sorted, start + (end-start)/2, end); mergeDevice(list, sorted, start, start + (end-start)/2, end); } __global__ void callMerge(int *list, int *sorted, int chunkSize, int N) { if (chunkSize >= N) return; int tid = blockIdx.x*blockDim.x + threadIdx.x; int start = tid*chunkSize; int end = start + chunkSize; if (end > N) { end = N; } mergeDevice(list, sorted, start, start + (end-start)/2, end); } __global__ void callMergeSort(int *list, int *sorted, int chunkSize, int N) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int start = tid*chunkSize; int end = start + chunkSize; if (end > N) { end = N; } mergeSortKernel(list, sorted, start, end); } void printArray(int A[], int size) { int i; for (i=0; i < size; i++) printf("%d ", A[i]); printf("\n"); } int contarSeparacions(int A[], int size) { int s = 0; int i; for (i=0; i < size-1; i++) { if (A[i] > A[i+1]) { s++; } } return s; } void sortBlocks(int *list, int *sorted, int N, int s) { int chunkSize = N/s; int start = 0; int end = chunkSize; int mid = (start+end)/2; mergeHost(list, sorted, start, mid, end); /* while (chunkSize < (N + chunkSize)) { mergeHost(list, sorted, start, mid, end); chunkSize = chunkSize*2; end = end + chunkSize; mid = end - chunkSize; } */ } void InitV(int N, int *v) { int i; for (i=0; i<N; i++) v[i] = rand() % 131072; } int main() { int *arr_h, *arrSorted_h, *arrSortedF_h; int *arr_d, *arrSorted_d, *arrSortedF_d; int chunkSize; unsigned int nBytes; unsigned int N; unsigned int nBlocks, nThreads; N = 131072; nThreads = 128; nBlocks = 32; chunkSize = N/(nThreads*nBlocks); nBytes = N * sizeof(int); cudaEvent_t start, stop; float timeTaken; cudaEventCreate(&start); cudaEventCreate(&stop); arr_h = (int*) malloc(nBytes); arrSorted_h = (int*) malloc(nBytes); arrSortedF_h = (int*) malloc(nBytes); cudaMallocHost((int **) &arr_h, nBytes); cudaMallocHost((int **) &arrSorted_h, nBytes); cudaMallocHost((int **) &arrSortedF_h, nBytes); InitV(N, arr_h); cudaMalloc((int**)&arr_d, nBytes); cudaMalloc((int**)&arrSorted_d, nBytes); cudaMalloc((int**)&arrSortedF_d, nBytes); cudaMemcpy(arr_d, arr_h, nBytes, cudaMemcpyHostToDevice); printf("Given array is \n"); printArray(arr_h, N); cudaEventRecord(start, 0); callMergeSort<<<nBlocks, nThreads>>>(arr_d, arrSorted_d,chunkSize, N); int auxChunkSize = chunkSize*2; int auxBlock = nBlocks; int auxThread = nThreads/2; while (auxChunkSize < N) { callMerge<<<auxBlock, auxThread>>>(arrSorted_d, arrSortedF_d, auxChunkSize, N); auxChunkSize = auxChunkSize*2; //auxThread = auxThread/2; } cudaMemcpy(arrSorted_h, arrSortedF_d, nBytes, cudaMemcpyDeviceToHost); int s = contarSeparacions(arrSorted_h, N); printf("\nSEPARACIONS: %d \n", s); sortBlocks(arrSorted_h, arrSortedF_h, N, s); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaFree(arr_d); cudaFree(arrSorted_d); cudaFree(arrSortedF_d); cudaEventElapsedTime(&timeTaken, start, stop); printf("\nSorted array is \n"); printArray(arrSortedF_h, N); printf("SEPARACIONS: %d\n", s); printf("nThreads: %d\n", nThreads); printf("nBlocks: %d\n", nBlocks); printf("Tiempo Total %4.6f ms\n", timeTaken); printf("Ancho de Banda %4.3f GB/s\n", (N * sizeof(int)) / (1000000 * timeTaken)); return 0; }
21,923
/** Homework 3 question 2 code * * \file q2.cu * \author Utkarsh Vardan <uvardan@utexas.edu> * \author Jose Carlos Martinez Garcia-Vaso <carlosgvaso@utexas.edu> */ #include <cstdio> // standard I/O #include <string> // strings #include <fstream> // streams #include <vector> // std::vector #include <sstream> // std::stringstream #include <cuda_runtime.h> // CUDA functions // Globals #define DEBUG 0 //! Enable debug messages (0: no log output, 1: non-verbose logs, 2: verbose logs, 3: all logs) #define INPUT_FILE "inp.txt" #define OUTPUT_FILE_Q2A "q2a.txt" #define OUTPUT_FILE_Q2B "q2b.txt" #define OUTPUT_FILE_Q2C "q2c.txt" #define EXIT_OK 0 //! Exit code success #define EXIT_FATAL 1 //! Exit code unrecoverable error #define RANGES_NUM 10 //! Number of ranges (d_out size) /** Read input from file * * This function assumes the file contains a single line, as per the format in * the README.txt. * * \param filename Name of input file to read * \return Vector containing the input array in the file */ std::vector<int> read_input (std::string filename) { // Create a vector of integers to store the array in file std::vector<int> arr_in; // Create an input filestream std::ifstream fin(filename); // Make sure the file is open if (!fin.is_open()) { fprintf(stderr, "ERROR:read_input: Could not open file\n"); exit(EXIT_FATAL); } // Helper vars std::string line; int val; // Read the column names if (fin.good()) { // Extract the first line in the file std::getline(fin, line); // Create a stringstream from line std::stringstream ss(line); // Extract each integer while (ss >> val) { // Add the current integer to the vector arr_in.push_back(val); // If the next token is a comma, ignore it and move on if (ss.peek() == ',') ss.ignore(); } } // Close file fin.close(); return arr_in; } /** Write formated output to file * * This function uses the output format described in the README.txt file. * * \param filename Name of the output file * \param arr_out Vector to save to file */ void write_output (std::string filename, std::vector<int> arr_out) { // Create an output filestream object std::ofstream fout(filename); // Send arr_out vector entries to the stream for (int i = 0; i < arr_out.size(); ++i) { fout << arr_out.at(i); if(i != arr_out.size() - 1) fout << ", "; // No comma at end of line } //fout << "\n"; // inp.txt doesn't have a newline at the end of the file // Close the file fout.close(); } /** CUDA kernel for counting the entries in parallel using global memory * * \param d_out Pointer to output array in global memory * \param d_in Pointer to input array in global memory * \param n Size of the problem (input array size) */ __global__ void counterGlobalKernel(int *d_out, int *d_in, int n) { int tid=threadIdx.x; int blockid=blockIdx.x; int offset= blockid * blockDim.x; int gid=tid + offset; if (gid < n){ if (d_in[gid]>=0 && d_in[gid]<100){ atomicAdd(&d_out[0],1); } else if (d_in[gid]>=100 && d_in[gid]<200){ atomicAdd(&d_out[1],1); } else if (d_in[gid]>=200 && d_in[gid]<300){ atomicAdd(&d_out[2],1); } else if (d_in[gid]>=300 && d_in[gid]<400){ atomicAdd(&d_out[3],1); } else if (d_in[gid]>=400 && d_in[gid]<500){ atomicAdd(&d_out[4],1); } else if (d_in[gid]>=500 && d_in[gid]<600){ atomicAdd(&d_out[5],1); } else if (d_in[gid]>=600 && d_in[gid]<700){ atomicAdd(&d_out[6],1); } else if (d_in[gid]>=700 && d_in[gid]<800){ atomicAdd(&d_out[7],1); } else if (d_in[gid]>=800 && d_in[gid]<900){ atomicAdd(&d_out[8],1); } else if (d_in[gid]>=900 && d_in[gid]<1000){ atomicAdd(&d_out[9],1); } } __syncthreads(); #if DEBUG >= 2 if (gid == 0) { printf("\t\tResult: [ "); for (int i=0; i<RANGES_NUM; ++i) { if (i == RANGES_NUM-1) { printf("%d ]\n", d_out[i]); } else { printf("%d, ", d_out[i]); } } } #endif } /** CUDA kernel for counting the entries in parallel using shared memory * * \param d_out Pointer to output array in global memory * \param d_in Pointer to input array in global memory * \param n Size of the problem (input array size) */ __global__ void counterSharedKernel(int *d_out, int *d_in, int n) { /* d_shared is allocated in the kernel call 3rd arg: <<<blk, th, shmem>>>. * We allocated RANGES_NUM extra entries in the array to save each block's * results to shared memory at the end of the array, after the input arrays * entries. */ extern __shared__ int d_shared[]; int tid = threadIdx.x; int blockid = blockIdx.x; int offset = blockid * blockDim.x; int gid = tid + offset; // Load shared mem from shared mem if (gid < n) { d_shared[tid] = d_in[gid]; // Initialize the results part of the array to all zeroes if (tid == 0) { for (int i=n; i<n+RANGES_NUM; ++i) { d_shared[i] = 0; } } } __syncthreads(); if (gid < n){ if (d_shared[tid]>=0 && d_shared[tid]<100){ atomicAdd(&d_shared[n+0],1); } else if (d_shared[tid]>=100 && d_shared[tid]<200){ atomicAdd(&d_shared[n+1],1); } else if (d_shared[tid]>=200 && d_shared[tid]<300){ atomicAdd(&d_shared[n+2],1); } else if (d_shared[tid]>=300 && d_shared[tid]<400){ atomicAdd(&d_shared[n+3],1); } else if (d_shared[tid]>=400 && d_shared[tid]<500){ atomicAdd(&d_shared[n+4],1); } else if (d_shared[tid]>=500 && d_shared[tid]<600){ atomicAdd(&d_shared[n+5],1); } else if (d_shared[tid]>=600 && d_shared[tid]<700){ atomicAdd(&d_shared[n+6],1); } else if (d_shared[tid]>=700 && d_shared[tid]<800){ atomicAdd(&d_shared[n+7],1); } else if (d_shared[tid]>=800 && d_shared[tid]<900){ atomicAdd(&d_shared[n+8],1); } else if (d_shared[tid]>=900 && d_shared[tid]<1000){ atomicAdd(&d_shared[n+9],1); } } __syncthreads(); // Only 1 thread syncs data to global memory if (tid == 0) { for (int i=0; i<RANGES_NUM; ++i) { atomicAdd(&d_out[i], d_shared[n+i]); } #if DEBUG >= 2 printf("\t\tResult: Block %d: [ ", blockIdx.x); for (int i=0; i<RANGES_NUM; ++i) { if (i == RANGES_NUM-1) { printf("%d ]\n", d_shared[n+i]); } else { printf("%d, ", d_shared[n+i]); } } #endif } } /** CUDA kernel for the Hillis-Steele parallel scan sum * * \param d_in Pointer to input array in global memory * \param n Size of the problem (input array size) */ __global__ void parallelScanSumKernel(int *d_in, int n) { // Initialize global and thread IDs, and other variables int gid = threadIdx.x + blockDim.x * blockIdx.x; #if DEBUG >= 3 int tid = threadIdx.x; #endif int val = 0; // Ensure we only access available array entries if (gid < n) { #if DEBUG >= 3 if (tid == 0) { printf("\t\tIterations:\n\t\t\tBlock %d: d = %d: d_in = [ ", blockIdx.x, 0); for (int i=0; i<n; ++i) { if (i == n-1) { printf("%d ]\n", d_in[i]); } else { printf("%d, ", d_in[i]); } } } #endif for (int d=1; d<n; d=d*2) { if (gid >= d) { val = d_in[gid - d]; } __syncthreads(); if (gid >= d) { d_in[gid] = d_in[gid] + val; } __syncthreads(); #if DEBUG >= 3 if (tid == 0) { printf("\t\t\tBlock %d: d = %d: d_in = [ ", blockIdx.x, d); for (int i=0; i<n; ++i) { if (i == n-1) { printf("%d ]\n", d_in[i]); } else { printf("%d, ", d_in[i]); } } } #endif } #if DEBUG >= 2 if (gid == n-1) { printf("\t\tResult: [ "); for (int i=0; i<n; ++i) { if (i == n-1) { printf("%d ]\n", d_in[i]); } else { printf("%d, ", d_in[i]); } } } #endif } } /** Q2 a) Compute a counter array in global memory of GPU * * Create an array B of size 10 that keeps a count of the entries in each of the * ranges:[0, 99], [100, 199], [200, 299], ... , [900, 999]. For this part of * the problem, maintain array B in global memory of GPU. * * \param v_in Input array as a vector * \param dev_props CUDA device properties * \return Output vector with the contents of array B */ std::vector<int> q2a (const std::vector<int> &v_in, cudaDeviceProp *dev_props) { #if DEBUG printf("\tTransfering input array to GPU memory...\n"); #endif // Declare GPU memory pointers int *d_in, *d_out; // Allocate GPU memory int N = v_in.size(); // Problem size (input array size) int N_out = RANGES_NUM; // Output array size int d_in_size = N * sizeof(int); // Input array size in bytes int d_out_size = N_out * sizeof(int); // Output array size in bytes // Allocate output array, and initilize to all zeroes int *a_out; a_out = (int*) calloc(N_out, sizeof(int)); #if DEBUG printf("\tN (input array size): %d\n", N); #endif /* if (N > ((int)((*dev_props).maxThreadsPerBlock) * (int)((*dev_props).maxThreadsPerBlock))) { fprintf(stderr, "ERROR:q1a: problem size (input array size) is too large\n"); exit(EXIT_FATAL); } */ cudaMalloc((void **) &d_in, d_in_size); cudaMalloc((void **) &d_out, d_out_size); /* Transfer the input and output arrays to the GPU * Since the elements of a vector are stored contiguously in memory, we can * pass a pointer to the first element of the vector, and that will act as * if we passed a C array. */ cudaMemcpy(d_in, &v_in[0], d_in_size, cudaMemcpyHostToDevice); cudaMemcpy(d_out, a_out, d_out_size, cudaMemcpyHostToDevice); #if DEBUG // Set up a timer to measure the elapsed time to find the min cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); printf("\tCounting entries in global memory...\n"); #endif // Calculate the number of blocks and threads to use int threads_per_block = (int)((*dev_props).maxThreadsPerBlock); // Max number of threads per block int blocks_per_grid = (N + (threads_per_block - 1)) / threads_per_block; #if DEBUG printf("\tThreads per block: %d\n", threads_per_block); printf("\tBlocks per grid: %d\n", blocks_per_grid); printf("\tRunning kernel...\n"); cudaEventRecord(start, 0); #endif // Launch the kernel to find min counterGlobalKernel<<<blocks_per_grid, threads_per_block>>> (d_out, d_in, N); // Make sure all the blocks finish executing cudaDeviceSynchronize(); cudaDeviceSynchronize(); #if DEBUG cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Calculate elapsed time, and print it float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("\tAverage time elapsed: %f\n", elapsedTime); #endif // Copy back the result from GPU cudaMemcpy(a_out, d_out, d_out_size, cudaMemcpyDeviceToHost); #if DEBUG >= 2 printf("\ta_out = [ "); for (int i=0; i<N_out; ++i) { if (i == N_out-1) { printf("%d ]\n", a_out[i]); } else { printf("%d, ", a_out[i]); } } #endif // Copy resulting array to output vector std::vector<int> v_out (a_out, a_out + N_out); #if DEBUG printf("\tOutput = [ "); for (int i=0; i<v_out.size(); ++i) { if (i == v_out.size()-1) { printf("%d ]\n", v_out[i]); } else { printf("%d, ", v_out[i]); } } #endif // Free GPU memory cudaFree(d_in); cudaFree(d_out); // Free host memory free(a_out); // Save output to file write_output(OUTPUT_FILE_Q2A, v_out); // Return the output vector to be used in Q2 c return v_out; } /** Q2 b) Compute a counter array in shared memory of GPU * * Repeat part (a) but first use the shared memory in a block for updating the * local copy of B in each block. Once every block is done, add all local copies * to get the global copy of B. * * \param v_in Input array as a vector * \param dev_props CUDA device properties */ void q2b (const std::vector<int> &v_in, cudaDeviceProp *dev_props) { #if DEBUG printf("\tTransfering input array to GPU memory...\n"); #endif // Declare GPU memory pointers int *d_in, *d_out; // Allocate GPU memory int N = v_in.size(); // Problem size (input array size) int N_out = RANGES_NUM; // Output array size int d_in_size = N * sizeof(int); // Input array size in bytes int d_out_size = N_out * sizeof(int); // Output array size in bytes // Allocate output array, and initilize to all zeroes int *a_out; a_out = (int*) calloc(N_out, sizeof(int)); #if DEBUG printf("\tN (input array size): %d\n", N); #endif /* if (N > ((int)((*dev_props).maxThreadsPerBlock) * (int)((*dev_props).maxThreadsPerBlock))) { fprintf(stderr, "ERROR:q1a: problem size (input array size) is too large\n"); exit(EXIT_FATAL); } */ cudaMalloc((void **) &d_in, d_in_size); cudaMalloc((void **) &d_out, d_out_size); /* Transfer the input and output arrays to the GPU * Since the elements of a vector are stored contiguously in memory, we can * pass a pointer to the first element of the vector, and that will act as * if we passed a C array. */ cudaMemcpy(d_in, &v_in[0], d_in_size, cudaMemcpyHostToDevice); cudaMemcpy(d_out, a_out, d_out_size, cudaMemcpyHostToDevice); #if DEBUG // Set up a timer to measure the elapsed time to find the min cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); printf("\tCounting entries in shared memory...\n"); #endif // Calculate the number of blocks and threads to use int threads_per_block = (int)((*dev_props).maxThreadsPerBlock); // Max number of threads per block int blocks_per_grid = (N + (threads_per_block - 1)) / threads_per_block; #if DEBUG printf("\tThreads per block: %d\n", threads_per_block); printf("\tBlocks per grid: %d\n", blocks_per_grid); printf("\tRunning kernel...\n"); cudaEventRecord(start, 0); #endif /* Launch the kernel to find min * The 3rd arg to kernel is the size of the shared memory array. This is the * size of the input array plus the size of the output array, because we * save input array and each block's results to shared memory. The input * array is saved first, and the output array is saved after this. */ counterSharedKernel<<<blocks_per_grid, threads_per_block, d_in_size+d_out_size>>> (d_out, d_in, N); // Make sure all the blocks finish executing cudaDeviceSynchronize(); cudaDeviceSynchronize(); #if DEBUG cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Calculate elapsed time, and print it float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("\tAverage time elapsed: %f\n", elapsedTime); #endif // Copy back the result from GPU cudaMemcpy(a_out, d_out, d_out_size, cudaMemcpyDeviceToHost); #if DEBUG >= 2 printf("\ta_out = [ "); for (int i=0; i<N_out; ++i) { if (i == N_out-1) { printf("%d ]\n", a_out[i]); } else { printf("%d, ", a_out[i]); } } #endif // Copy resulting array to output vector std::vector<int> v_out (a_out, a_out + N_out); #if DEBUG printf("\tOutput = [ "); for (int i=0; i<v_out.size(); ++i) { if (i == v_out.size()-1) { printf("%d ]\n", v_out[i]); } else { printf("%d, ", v_out[i]); } } #endif // Free GPU memory cudaFree(d_in); cudaFree(d_out); // Free host memory free(a_out); // Save output to file write_output(OUTPUT_FILE_Q2B, v_out); } /** Q2 c) Compute a counter array in global memory of GPU * * Create an array of size 10 that uses B to compute C which keeps count of the * entries in each of the ranges:[0,99], [0,199], [0,299],. . . , [0, 999]. Note * that the ranges are different from the part (a). For this part of the * problem, you must not use array A. * * \param v_in Input array as a vector * \param dev_props CUDA device properties */ void q2c (const std::vector<int> &v_in, cudaDeviceProp *dev_props) { #if DEBUG printf("\tTransfering input array to GPU memory...\n"); #endif // Declare GPU memory pointers int *d_in; // Allocate GPU memory int N = v_in.size(); // Problem size (input array size) int d_in_size = N * sizeof(int); // Input array size in bytes #if DEBUG printf("\tN (input array size): %d\n", N); #endif /* if (N > ((int)((*dev_props).maxThreadsPerBlock) * (int)((*dev_props).maxThreadsPerBlock))) { fprintf(stderr, "ERROR:q1a: problem size (input array size) is too large\n"); exit(EXIT_FATAL); } */ cudaMalloc((void **) &d_in, d_in_size); /* Transfer the input array to the GPU * Since the elements of a vector are stored contiguously in memory, we can * pass a pointer to the first element of the vector, and that will act as * if we passed a C array. */ cudaMemcpy(d_in, &v_in[0], d_in_size, cudaMemcpyHostToDevice); #if DEBUG // Set up a timer to measure the elapsed time to find the min cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); printf("\tCounting entries using result from Q2 a...\n"); #endif // Calculate the number of blocks and threads to use int threads_per_block = (int)((*dev_props).maxThreadsPerBlock); // Max number of threads per block int blocks_per_grid = (N + (threads_per_block - 1)) / threads_per_block; #if DEBUG printf("\tThreads per block: %d\n", threads_per_block); printf("\tBlocks per grid: %d\n", blocks_per_grid); printf("\tRunning kernel...\n"); cudaEventRecord(start, 0); #endif // Launch the kernel to find min parallelScanSumKernel<<<blocks_per_grid, threads_per_block>>> (d_in, N); // Make sure all the blocks finish executing cudaDeviceSynchronize(); cudaDeviceSynchronize(); #if DEBUG cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Calculate elapsed time, and print it float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("\tAverage time elapsed: %f\n", elapsedTime); #endif // Copy back the result from GPU int *a_out; a_out = (int*) malloc(d_in_size); cudaMemcpy(a_out, d_in, d_in_size, cudaMemcpyDeviceToHost); #if DEBUG >= 2 printf("\ta_out = [ "); for (int i=0; i<N; ++i) { if (i == N-1) { printf("%d ]\n", a_out[i]); } else { printf("%d, ", a_out[i]); } } #endif // Copy resulting array to output vector std::vector<int> v_out (a_out, a_out + N); #if DEBUG printf("\tOutput = [ "); for (int i=0; i<v_out.size(); ++i) { if (i == v_out.size()-1) { printf("%d ]\n", v_out[i]); } else { printf("%d, ", v_out[i]); } } #endif // Free GPU memory cudaFree(d_in); // Free host memory free(a_out); // Save output to file write_output(OUTPUT_FILE_Q2C, v_out); } /** Main * * Set up CUDA device, read input file, and run Q2a, Q2b and Q2c. * * \param argc Number of command-line arguments * \param argv Array of command-line arguments * \return Program return code */ int main (int argc, char **argv) { #if DEBUG std::printf("Executing main...\n"); #endif std::vector<int> v_in; std::vector<int> v_out; int device_count; int dev = 0; cudaDeviceProp dev_props; #if DEBUG printf("Detecting CUDA devices...\n"); #endif // Check there are CUDA devices available cudaGetDeviceCount(&device_count); if (device_count == 0) { fprintf(stderr, "ERROR:main: no CUDA devices found\n"); exit(EXIT_FATAL); } // Use device 0 cudaSetDevice(dev); if (cudaGetDeviceProperties(&dev_props, dev) == 0) { #if DEBUG printf("Using device:\n" "\tID: %d\n" "\tName: %s\n" "\tGlobal mem: %d B\n" "\tMax threads per block: %d\n" "\tCompute: v%d.%d\n" "\tClock: %d kHz\n", dev, dev_props.name, (int)dev_props.totalGlobalMem, (int)dev_props.maxThreadsPerBlock, (int)dev_props.major, (int)dev_props.minor, (int)dev_props.clockRate); #endif } else { fprintf(stderr, "ERROR:main: could not find CUDA device information\n"); exit(EXIT_FATAL); } #if DEBUG std::printf("Reading input array...\n"); #endif // Read input array v_in = read_input(INPUT_FILE); #if DEBUG >= 2 printf("\tInput array = [ "); for (int i=0; i<v_in.size(); ++i) { if (i == v_in.size()-1) { printf("%d ]\n", v_in[i]); } else { printf("%d, ", v_in[i]); } } #endif #if DEBUG std::printf("Running Q2 a...\n"); #endif // Problem q2 a v_out = q2a(v_in, &dev_props); /* #if DEBUG std::printf("Reseting device...\n"); #endif cudaDeviceReset(); */ #if DEBUG std::printf("Running Q2 b...\n"); #endif // Problem q2 b q2b(v_in, &dev_props); /* #if DEBUG std::printf("Reseting device...\n"); #endif cudaDeviceReset(); */ #if DEBUG std::printf("Running Q2 c...\n"); #endif // Problem q2 c q2c(v_out, &dev_props); /* #if DEBUG std::printf("Reseting device...\n"); #endif cudaDeviceReset(); */ #if DEBUG std::printf("Done\n"); #endif return 0; }
21,924
#include <stdio.h> #include <stdlib.h> #include <math.h> #define M 1024 __global__ void sumMatrixes(int* A, int* B, int* C, int n){ int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n){ C[index] = A[index] + B[index]; } } int main(void){ int *A, *B, *C; int i, j; //Input int linhas, colunas; scanf("%d", &linhas); scanf("%d", &colunas); int N = linhas * colunas; int size = N * sizeof(int); //Alocando memória na CPU A = (int *)malloc(sizeof(int)*linhas*colunas); B = (int *)malloc(sizeof(int)*linhas*colunas); C = (int *)malloc(sizeof(int)*linhas*colunas); //Inicializar for(i = 0; i < linhas; i++){ for(j = 0; j < colunas; j++){ A[i*colunas+j] = B[i*colunas+j] = i+j; } } //tentamos alocar espaco de memoria para as matrizes na GPU, se der erro, mostramos o erro na saida padrao. int *_A, *_B, *_C; cudaMalloc((void**)&_A, size); cudaMalloc((void**)&_B, size); cudaMalloc((void**)&_C, size); cudaMemcpy(_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(_B, B, size, cudaMemcpyHostToDevice); sumMatrixes <<< (N + M-1)/ M, M >>> (_A, _B, _C, N); cudaMemcpy(C, _C, size, cudaMemcpyDeviceToHost); //Computacao que deverá ser movida para a GPU (que no momento é executada na CPU) //Lembrar que é necessário usar mapeamento 2D (visto em aula) // for(i=0; i < linhas; i++){ // for(j = 0; j < colunas; j++){ // C[i*colunas+j] = A[i*colunas+j] + B[i*colunas+j]; // } // } long long int somador=0; //Manter esta computação na CPU for(i = 0; i < linhas; i++){ for(j = 0; j < colunas; j++){ somador+=C[i*colunas+j]; } } printf("%lli\n", somador); free(A); free(B); free(C); cudaFree(_A); cudaFree(_B); cudaFree(_C); }
21,925
#include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void mandelKernel(int* d_img, const int maxIter, const float stepX, const float stepY, const float lowerX, const float lowerY) { // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; // int thisX = blockDim.x*blockIdx.x+threadIdx.x; //0~1599 // int thisY = blockIdx.y; //0~1199 int horz_idx = blockIdx.x % 50; int vert_idx = blockIdx.x / 50; int thisX = horz_idx*32 + threadIdx.x; int thisY = vert_idx*15 + threadIdx.y; const int index = (thisY * 1600) + thisX; const float x = lowerX + thisX * stepX; const float y = lowerY + thisY * stepY; // int i; float z_x = x; float z_y = y; for(i=0;i<maxIter;i++){ if(z_x*z_x + z_y*z_y > 4.f) break; const float new_x = z_x*z_x - z_y*z_y; const float new_y = 2.f * z_x * z_y; z_x = x + new_x; z_y = y + new_y; } d_img[index] = i; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; // int width = resX; int height = resY; int N = width*height; int *d_img, *h_img; size_t pitch; // //cudaHostAlloc(&h_img,N*sizeof(int),cudaHostAllocMapped); //cudaHostGetDevicePointer(&d_img,h_img,0); cudaMallocPitch(&d_img, &pitch, width*sizeof(int),height); // dim3 blockSize(32,15); dim3 blockNum(4000); mandelKernel<<<blockNum,blockSize>>>(d_img, maxIterations,stepX,stepY,lowerX,lowerY); // cudaDeviceSynchronize(); // cudaMemcpy(img,d_img,N*sizeof(int),cudaMemcpyDeviceToHost); // }
21,926
#include "bp.cuh" void update_fc2_b() { for(int i=0;i<FC2_SIZE;i++) { fc2_delta[i]=alpha*C[i]*(fc2_a[i]*(1.0-fc2_a[i])); fc2_db[i]+=fc2_delta[i]; } } void update_fc2_w() { for(int i=0;i<FC2_SIZE;i++) for(int j=0;j<FC1_SIZE;j++) fc2_dw[i][j]+=fc2_delta[i]*fc1_a[j]; } void update_fc1_b() { for(int i=0;i<FC1_SIZE;i++) { float error=0; for(int j=0;j<FC2_SIZE;j++) error+=fc2_delta[j]*fc2_w[j][i]; fc1_delta[i]=error*(fc1_a[i]*(1.0-fc1_a[i])); fc1_db[i]+=fc1_delta[i]; } } void update_fc1_w() { for(int i=0;i<FC1_SIZE;i++) for(int j=0;j<CONV_W_NUM;j++) for(int k=0;k<POOL_SIZE;k++) for(int l=0;l<POOL_SIZE;l++) fc1_dw[i][j][k][l]+=fc1_delta[i]*pool[j][k][l]; } void update_conv_b() { for(int i=0;i<CONV_W_NUM;i++) { conv_sigma_delta[i]=0; for(int j=0;j<POOL_SIZE;j++) for(int k=0;k<POOL_SIZE;k++) { float error=0; conv_delta[i][j][k]=0; for(int l=0;l<FC1_SIZE;l++) error+=fc1_delta[l]*fc1_w[l][i][j][k]; conv_delta[i][j][k]=error*(pool[i][j][k]*(1.0-pool[i][j][k])); conv_sigma_delta[i]+=error*(pool[i][j][k]*(1.0-pool[i][j][k])); } conv_db[i]+=conv_sigma_delta[i]; } } void update_conv_w() { for(int i=0;i<CONV_W_NUM;i++) for(int j=0;j<CONV_W_SIZE;j++) for(int k=0;k<CONV_W_SIZE;k++) { float error=0; for(int m=0;m<POOL_SIZE;m++) for(int n=0;n<POOL_SIZE;n++) { int x=pool_pos[i][m][n]/2; int y=pool_pos[i][m][n]%2; error+=conv_delta[i][m][n]*input[2*m+j+x][2*n+k+y]; } conv_dw[i][j][k]+=error; } } void assign_grads() { for(int i=0;i<FC2_SIZE;i++) { fc2_b[i]-=(fc2_db[i]/minibatch); fc2_db[i]=0; } for(int i=0;i<FC2_SIZE;i++) for(int j=0;j<FC1_SIZE;j++) { fc2_w[i][j]-=(fc2_dw[i][j]/minibatch); fc2_dw[i][j]=0; } for(int i=0;i<FC1_SIZE;i++) { fc1_b[i]-=(fc1_db[i]/minibatch); fc1_db[i]=0; } for(int i=0;i<FC1_SIZE;i++) for(int j=0;j<CONV_W_NUM;j++) for(int k=0;k<POOL_SIZE;k++) for(int l=0;l<POOL_SIZE;l++) { fc1_w[i][j][k][l]-=(fc1_dw[i][j][k][l]/minibatch); fc1_dw[i][j][k][l]=0; } for(int i=0;i<CONV_W_NUM;i++) { conv_b[i]-=(conv_db[i]/minibatch); conv_db[i]=0; } for(int i=0;i<CONV_W_NUM;i++) for(int j=0;j<CONV_W_SIZE;j++) for(int k=0;k<CONV_W_SIZE;k++) { conv_w[i][j][k]-=(conv_dw[i][j][k]/minibatch); conv_dw[i][j][k]=0; } }
21,927
#include "includes.h" __global__ void dotProduct_CUDA(double *sum, long size, double *vector1, double *vector2){ long idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks if(idx < size){ //printf("Before idx%d : %lf\n",idx,sum[idx]); sum[idx] = (vector2[idx]) * (vector1[idx]); //printf("Vector1 %lf\n",*(vector1+idx)); //printf("Vector2 %lf\n",vector2[idx]); //printf("After idx%d : %lf\n",idx,sum[idx]); } }
21,928
#include "includes.h" __global__ void average_snips(const double *Params, const int *ioff, const int *id, const float *uproj, const float *cmax, const int *iList, float *cf, float *WU){ int tid, bid, ind, Nspikes, Nfeatures, NfeatW, Nnearest, t; float xsum = 0.0f, pm; Nspikes = (int) Params[0]; Nfeatures = (int) Params[1]; pm = (float) Params[3]; NfeatW = (int) Params[4]; Nnearest = (int) Params[6]; tid = threadIdx.x; bid = blockIdx.x; for(ind=0; ind<Nspikes;ind++) if (id[ind]==bid){ xsum = uproj[tid + Nfeatures * ind]; WU[tid + ioff[ind] + NfeatW * bid] = pm * WU[tid + ioff[ind] + NfeatW * bid] + (1-pm) * xsum; // go through the top 10 nearest filters and match them for (t=0;t<Nnearest;t++) cf[ind + t*Nspikes] = cmax[ind + Nspikes * iList[t + Nnearest*bid]]; } }
21,929
/* * @author Connie Shi * Lab 3: Write a reduction program in CUDA that finds the maximum * of an array of M integers. * Part 2: Write a CUDA version that DOES take thread divergence * into account. Uses sequential addressing. * * Should be run on cuda1 machine with 1024 max threads per block. */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #define THREADS_PER_BLOCK 1024 #define WARP 32 /* Function Declarations */ void generate_random(int random[], int num_elements); __global__ void max_in_blocks(int random[], int num_elements); __device__ void sequential(int random[], int num_elements); /* Generates M random numbers from 1 to 100000*/ void generate_random(int random[], int num_elements) { int i; time_t t; srand((unsigned)time(&t)); //randomizes seed for (i = 0; i < num_elements; i++) { random[i] = (int)(((double)rand()/RAND_MAX)*100000); } } /* global function called from host and executed on kernel * Uses a tree-like structure to do parallel max reduction. * DOES avoid branch divergence. Uses coalescing. */ __global__ void max_in_blocks(int random[], int num_elements) { unsigned int tid = threadIdx.x; unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; int stride; // Stop when warp (size 32) will have branch divergence for (stride = blockDim.x/2; stride >= 32; stride >>= 1) { if (tid < stride) { if (random[index] < random[index + stride]) { random[index] = random[index + stride]; } } __syncthreads(); } __syncthreads(); // The max is in the first 32 positions // Sequential search 32 elements is very fast if (tid == 0) { sequential(random, num_elements); } } /* Sequential searches through the first 32 positions of the block * to prevent further divvying up of the warp into different branches */ __device__ void sequential(int random[], int num_elements) { int i; int max = 0; int tid = threadIdx.x + blockDim.x * blockIdx.x; for (i = tid; i < tid + WARP && i < num_elements; i++) { if (max < random[i]) { max = random[i]; } } // Put in block index position random[blockIdx.x] = max; } /**************************************************************/ int main(int argc, char*argv[]) { int* h_random; int* d_random; int i; int largest = 0; clock_t start, end; if (argc != 2) { printf("Invalid number of commands: usage ./cudadiv M\n"); exit(1); } // Generate array of random elements int num_elements = atoi(argv[1]); h_random = (int*)malloc(sizeof(int) * num_elements); generate_random(h_random, num_elements); // Work in finding max starts start = clock(); // Calculation for grid dimensions to multiple of warp int leftover = num_elements % WARP; int d_elements = num_elements - leftover; int n_blocks = (int)ceil((double)d_elements/THREADS_PER_BLOCK); int n_threads = (d_elements > THREADS_PER_BLOCK) ? THREADS_PER_BLOCK : d_elements; // Allocate space on device and copy over elements cudaError_t err = cudaMalloc((void**)&d_random, sizeof(int) * d_elements); if (err != cudaSuccess) { printf("cudaMalloc failure\n"); } err = cudaMemcpy(d_random, h_random, sizeof(int) * d_elements, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("cudaMemcpy failure\n"); } // Execute kernel max_in_blocks<<<n_blocks, n_threads>>>(d_random, d_elements); // While kernel is executing, find the max in leftover elements for (i = d_elements; i < num_elements; i++) { if (largest < h_random[i]) { largest = h_random[i]; } } // Retrieve reduction results, only need n_blocks elements back cudaMemcpy(h_random, d_random, sizeof(int) * n_blocks, cudaMemcpyDeviceToHost); // Check through n_blocks elements for the max for (i = 0; i < n_blocks; i ++) { if (largest < h_random[i]) { largest = h_random[i]; } } end = clock(); printf("Time to find max %f\n", (double)(end-start)/CLOCKS_PER_SEC); printf("Largest: %d\n", largest); // Clean up resources cudaFree(d_random); free(h_random); }
21,930
// To compile: nvcc CPUAndGPUVectorAdditionClass.cu -o temp2 // To run: ./temp2 #include <sys/time.h> #include <stdio.h> //This is the CUDA kernel that will add the two vectors. __global__ void Addition(unsigned char *A, unsigned char *B, unsigned char *C){ unsigned long id = (blockIdx.x * blockDim.x) + threadIdx.x; C[id] = A[id] + B[id]; } int main() { int dev_cnt; long unsigned int max_thread_cnt, threads_per_block; long unsigned int input_cnt, id, sum; unsigned char *A_CPU, *B_CPU, *C_CPU; //Pointers for memory on the Host unsigned char *A_GPU, *B_GPU, *C_GPU; //Pointers for memory on the Device float time; dim3 dimBlock; //This variable will hold the Dimensions of your block dim3 dimGrid; //This variable will hold the Dimensions of your grid timeval start, end; cudaDeviceProp prop; cudaGetDeviceCount(&dev_cnt); max_thread_cnt = 0; for (int i=0; i<dev_cnt; i++){ cudaGetDeviceProperties(&prop, i); threads_per_block = prop.maxThreadsPerBlock; printf("Threads per block for device %d: %d\n", i, threads_per_block); long unsigned int blocks = prop.maxGridSize[0]; for(int j=1; j<3; j++) blocks = (prop.maxGridSize[j] > blocks) ? prop.maxGridSize[j] : blocks; printf("Blocks on device %d: %d\n", i, blocks); max_thread_cnt += threads_per_block*blocks; } printf("\n"); do { printf("Size of array (< %lu)? \n", max_thread_cnt); } while (scanf("%lu", &input_cnt) > max_thread_cnt); //Threads in a block dimBlock.x = (input_cnt < 1024) ? input_cnt:1024; dimBlock.y = 1; dimBlock.z = 1; //Blocks in a grid dimGrid.x = 1+(input_cnt-1)/threads_per_block; dimGrid.y = 1; dimGrid.z = 1; //Allocate Host (CPU) Memory A_CPU = (unsigned char*)malloc(input_cnt*sizeof(unsigned char)); B_CPU = (unsigned char*)malloc(input_cnt*sizeof(unsigned char)); C_CPU = (unsigned char*)malloc(input_cnt*sizeof(unsigned char)); //Allocate Device (GPU) Memory cudaMalloc(&A_GPU,input_cnt*sizeof(unsigned char)); cudaMalloc(&B_GPU,input_cnt*sizeof(unsigned char)); cudaMalloc(&C_GPU,input_cnt*sizeof(unsigned char)); //Loads values into vectors that we will add. for(id = 0; id < input_cnt; id++){ A_CPU[id] = 1; B_CPU[id] = 2; } //********************** GPU addition start **************************************** //Starting a timer gettimeofday(&start, NULL); //Copying vectors A_CPU and B_CPU that were loaded on the CPU up to the GPU cudaMemcpyAsync(A_GPU, A_CPU, input_cnt*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpyAsync(B_GPU, B_CPU, input_cnt*sizeof(unsigned char), cudaMemcpyHostToDevice); //Addition<<<1, input_cnt>>>(A_GPU, B_GPU, C_GPU); Addition<<<dimGrid, dimBlock>>>(A_GPU, B_GPU, C_GPU); //Copy C_GPU that was calculated on the GPU down to the CPU cudaMemcpyAsync(C_CPU, C_GPU, input_cnt*sizeof(unsigned char), cudaMemcpyDeviceToHost); //Stopping the timer gettimeofday(&end, NULL); //********************** GPU addition finish **************************************** //Calculating the total time used in the addition on the GPU and converting it to milliseconds and printing it to the screen. time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf("\n------ GPU Results ------\n"); printf("GPU Time in milliseconds= %.15f\n", (time/1000.0)); //Summing up the vector C and printing it so we can have a spot check for the correctness of the GPU addition. sum = 0; for(id = 0; id < input_cnt; id++) sum += C_CPU[id]; printf("Sum of C_CPU from GPU addition= %lu\n", sum); //Your done so cleanup your mess. free(A_CPU); free(B_CPU); free(C_CPU); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU); return(0); }
21,931
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __device__ int isPrimeGPU(long x) { long long i; for (i = 2; i * i < x + 1; i++) { if (x % i == 0) { return 0; } } return 1; } __host__ int isPrime(long x) { long i; for (i = 2; i < sqrt(x) + 1; i++) { if (x % i == 0) { return 0; } } return 1; } __global__ void primeFind(int* c, long n) { // Get our global thread ID int id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds c[0] = (int)0; c[1] = (int)0; c[2] = (int)1; long num = (id * 2)-1; if (id < ((n/2)+1) && 1 < id) {//global thread 0 and 1 dont do anything need to deal with if (id * 2 < n) {//even numbers c[id * 2] = 0; } if (num < n) {//odd numbers c[num] = isPrimeGPU(num); } } } int main(int argc, const char* argv[]) { if (argc < 3){ printf("Usage: prime upbound\n"); exit(-1); } // Size of vectors long n = atoi(argv[1]); printf("n = %ld \n", n); if(n <= 0){ printf("Usage: prime upbound, you input invalid upbound number!\n"); exit(-1); } int blockSize = atoi(argv[2]); printf("block size = %d \n", blockSize); // Host output int* cpuOutput; //Device output vector int* gpuOutput; // Size, in bytes, of each output size_t bytes = (unsigned long long)n * sizeof(int); // Allocate memory for each vector on host cpuOutput = (int*)malloc(bytes);//pc results new int[n] gpuOutput = (int*)malloc(bytes);//gpu results //initalize for (long j = 0; j < n; j++) { cpuOutput[j] = 0; gpuOutput[j] = 0; } clock_t cStart = clock(); double cpuStart = (double) cStart/CLOCKS_PER_SEC;// /////////////////////////////////////////////////////////////////////////////////// //do it on cpu //TODO add systime to check how long it takes cpuOutput[0] = (int)0; cpuOutput[1] = (int)0; cpuOutput[2] = (int)1; for (long i = 2; i < (n/2)+1; i++) { long num = (i * 2) - 1; if (i * 2 < n) { cpuOutput[i * 2] = 0; } if (num < n) { cpuOutput[num] = isPrime(num); } } clock_t cEnd = clock(); double cpuEnd = (double)cEnd/CLOCKS_PER_SEC; //sum up pc result of # of primes int sum = 0; for (int i = 0; i < n; i++) { sum += (int)cpuOutput[i]; } printf("CPU final result: %d\n", sum); double cpuTotal = cpuEnd - cpuStart; printf("CPU took %lf seconds to find primes numbers up to %ld\n", cpuTotal, n); //////////////////////////////////////////////////////////////////////// //do it on gpu //TODO sys clock time for seeing how much time it takes clock_t gStart = clock(); double gpuStart = (double)gStart / CLOCKS_PER_SEC;; //Device output vector int* d_output; // Allocate memory for each vector on GPU cudaMalloc(&d_output, bytes); // Copy host vectors to device//i dont think we need to do this cudaMemcpy(d_output, gpuOutput, bytes, cudaMemcpyHostToDevice); int gridSize; // Number of thread blocks in grid gridSize = (int)ceil((double)((double)((n+1)/2)/blockSize)); primeFind<<<gridSize, blockSize>>>(d_output, n); // Copy array back to host cudaMemcpy(gpuOutput, d_output, bytes, cudaMemcpyDeviceToHost); clock_t gEnd = clock(); double gpuEnd = (double)gEnd / CLOCKS_PER_SEC; // Sum up vector c and print result divided by n, this should equal 1 without error sum = 0; for (long i = 2; i < n; i++) { sum += (int)gpuOutput[i]; } printf("GPU final result: %d\n", sum); long double gpuTotal = gpuEnd - gpuStart; printf("GPU took %Lf seconds to find primes numbers up to %ld\n", gpuTotal, n); printf("GPU speeds up the process %Lf times.\n", cpuTotal / gpuTotal); // Release device memory cudaFree(d_output); // Release host memory free(cpuOutput); free(gpuOutput); return 0; }
21,932
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <chrono> #define BLOCKS 1024 * 1024 #define THREADS 256 __global__ void FindKey(uint64_t aPlainText, uint64_t aKeyMax, uint64_t aCipherExpected, uint64_t* aResult) { int index = blockIdx.x * THREADS + threadIdx.x; int stride = BLOCKS * THREADS; if (*aResult) { return; } for (uint64_t key = index; key < aKeyMax; key += stride) { if (*aResult) { return; } uint64_t aWord = aPlainText; uint64_t aKey = key; for (char roundIndex = 0; roundIndex < 20; roundIndex++) { aWord ^= aKey; uint64_t sbox[] = { 0x2, 0x4, 0x5, 0x6, 0x1, 0xA, 0xF, 0x3, 0xB, 0xE, 0x0, 0x7, 0x9, 0x8, 0xC, 0xD }; uint64_t wordNew = 0; for (char nibbleIndex = 0; nibbleIndex < 16; nibbleIndex++) { char nibble = (aWord >> (nibbleIndex * 4)) & 0xF; wordNew |= sbox[nibble] << (nibbleIndex * 4); } aWord = wordNew; aWord = ((aWord << 15) | (aWord >> (64 - 15))) ^ ((aWord << 32) | (aWord >> (64 - 32))) ^ aWord; aKey = ((aKey << 15) | (aKey >> (64 - 15))) ^ ((aKey << 32) | (aKey >> (64 - 32))) ^ aKey ^ 0x3; } if (aWord == aCipherExpected) { *aResult = key; return; } } } int main() { // Assignment to find the key for // 0 - 20 bit key // 1 - 32 bit key // 2 - 44 bit key, cunldn't find :( char testCase = 2; uint64_t* resultHost; resultHost = (uint64_t*)malloc(sizeof(uint64_t)); *resultHost = false; uint64_t* resultDevice; cudaMalloc((void**)&resultDevice, sizeof(uint64_t)); cudaMemcpy(resultDevice, resultHost, sizeof(uint64_t), cudaMemcpyHostToDevice); // Find the key uint64_t plainTextVector[] = { 0x0441E17A4283531C, 0x2E01B3D043EFA867, 0x176BF4819739A044 }; uint64_t keyMaxVector[] = { 0x0000000000100000, 0x0000000100000000, 0x0000100000000000 }; uint64_t cipherExpectedVector[] = { 0x4546788A5ADF2106, 0x839C3F5FD7CFA5E5, 0x3422F9DFE688E023 }; auto start = std::chrono::high_resolution_clock::now(); FindKey<<<BLOCKS, THREADS>>>(plainTextVector[testCase], keyMaxVector[testCase], cipherExpectedVector[testCase], resultDevice); cudaMemcpy(resultHost, resultDevice, sizeof(uint64_t), cudaMemcpyDeviceToHost); std::cout << "Key: " << std::hex << *resultHost << std::endl; std::cout << "RunTime: " << std::dec << std::chrono::duration_cast<std::chrono::seconds>(std::chrono::high_resolution_clock::now() - start).count() << "[seconds]\n"; return 0; }
21,933
#include <iostream> #include <math.h> struct index { int x; int y; int z; }; __device__ struct index unravel_idx(int idx, int n){ struct index unravel; int x, y, z; z = idx % n; y = (idx / n) % n; x = (idx / n) / n; unravel = {.x = x, .y = y, .z = z}; return unravel; } __device__ int ravel_idx(struct index idx, int n){ return idx.x*n*n+idx.y*n + idx.z; } __device__ int should_live(int is_alive, int alive_count){ if (is_alive == 1){ if (alive_count < 4 || alive_count > 5){ return 0; } } else{ if (alive_count == 5){ return 1; } } return is_alive; } __global__ void evolve_kernel(int *cell_arr, int *out_arr, int n) { int num_elem = n*n*n; // cell index int current_idx = blockIdx.x*blockDim.x+threadIdx.x; // grid-stride loop for (int idx=current_idx; idx<num_elem; idx+=blockDim.x*gridDim.x){ struct index idx_3d = unravel_idx(idx, n); int alive_count = 0; // collect adjacent int adj_x[] = {idx_3d.x-1, idx_3d.x, idx_3d.x+1}; int adj_y[] = {idx_3d.y-1, idx_3d.y, idx_3d.y+1}; int adj_z[] = {idx_3d.z-1, idx_3d.z, idx_3d.z+1}; // count live for (int i = 0; i < 3; i++){ for (int j = 0; j < 3; j++){ for (int k=0; k<3; k++){ struct index _idx; _idx.x = adj_x[i]; _idx.y = adj_y[j]; _idx.z = adj_z[k]; if ((_idx.x > -1 && _idx.x < n) && (_idx.y > -1 && _idx.y < n) && (_idx.z > -1 && _idx.z < n)){ int adj_idx = ravel_idx(_idx, n); if (adj_idx != idx){ alive_count+=cell_arr[adj_idx]; } } } } } int is_alive = cell_arr[current_idx]; // apply local rules out_arr[current_idx] = should_live(is_alive, alive_count); } } void evolve(int *cell_arr, int *out_arr, int n) { int *_in, *_out; int num_elem; num_elem = n*n*n; // allocate unified mem cudaMallocManaged(&_in, num_elem*sizeof(int)); cudaMallocManaged(&_out, num_elem*sizeof(int)); // copy input for (int i = 0; i < num_elem; i++) { _in[i] = cell_arr[i]; } // call kernel int threadsPerBlock = 256; int blocks = (num_elem+threadsPerBlock)/threadsPerBlock; evolve_kernel<<<blocks,threadsPerBlock>>>(_in, _out, n); // sync with GPU cudaDeviceSynchronize(); cudaFree(_in); // copy output for (int i = 0; i < num_elem; i++) { out_arr[i] = _out[i]; } cudaFree(_out); }
21,934
#include <cuda.h> #include <stdio.h> int main(int argc, char** argv) { cudaError_t e; e = cudaPointerGetAttributes((struct cudaPointerAttributes*) 0, (void*) 0); printf("Error: %d\n", e); return 0; }
21,935
#include "includes.h" using namespace std; #define MAX_ARRAY_SIZE 1024 #define RANDOM_MAX 1000 #define TILE_DIM 16 #define BLOCK_ROWS 8 #define EPSILON 0.000001 #define NUM_BLOCKS (MAX_ARRAY_SIZE/TILE_DIM) float A[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE]; float C[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE]; void serial(); void init_F(); int check(); __global__ void matrixTranspose2(const float *F, float *C) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = F[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) C[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; }
21,936
#include <cstdio> #if defined(NDEBUG) #define CUDA_CHECK(x) (x) #else #define CUDA_CHECK(x) do {\ (x); \ cudaError_t e = cudaGetLastError(); \ if (cudaSuccess != e) { \ printf("cuda failure \"%s\" at %s:%d\n", \ cudaGetErrorString(e), \ __FILE__, __LINE__); \ exit(1); \ } \ } while (0) #endif // kernel program for the device (GPU): compiled by NVCC __global__ void addKernel(int* c, const int* a, const int* b) { int x = threadIdx.x; int y = threadIdx.y; int i = y * (blockDim.x) + x; // [y][x] = y * WIDTH + x; c[i] = a[i] + b[i]; } // main program for the CPU: compiled by MS-VC++ int main(void) { // host-side data const int WIDTH = 5; int a[WIDTH][WIDTH]; int b[WIDTH][WIDTH]; int c[WIDTH][WIDTH] = { 0 }; // make a, b matrices for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { a[y][x] = y * 10 + x; b[y][x] = (y * 10 + x) * 100; } } // device-side data int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; // allocate device memory CUDA_CHECK( cudaMalloc((void**)&dev_a, WIDTH * WIDTH * sizeof(int)) ); CUDA_CHECK( cudaMalloc((void**)&dev_b, WIDTH * WIDTH * sizeof(int)) ); CUDA_CHECK( cudaMalloc((void**)&dev_c, WIDTH * WIDTH * sizeof(int)) ); // copy from host to device CUDA_CHECK( cudaMemcpy(dev_a, a, WIDTH * WIDTH * sizeof(int), cudaMemcpyHostToDevice) ); CUDA_CHECK( cudaMemcpy(dev_b, b, WIDTH * WIDTH * sizeof(int), cudaMemcpyHostToDevice) ); // launch a kernel on the GPU with one thread for each element. dim3 dimBlock(WIDTH, WIDTH, 1); // x, y, z addKernel <<< 1, dimBlock>>>(dev_c, dev_a, dev_b); // dev_c = dev_a + dev_b; CUDA_CHECK( cudaPeekAtLastError() ); // copy from device to host CUDA_CHECK( cudaMemcpy(c, dev_c, WIDTH * WIDTH * sizeof(int), cudaMemcpyDeviceToHost) ); // free device memory CUDA_CHECK( cudaFree(dev_c) ); CUDA_CHECK( cudaFree(dev_a) ); CUDA_CHECK( cudaFree(dev_b) ); // print the result for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { printf("%5d", c[y][x]); } printf("\n"); } // done return 0; }
21,937
#include <stdio.h> #include <cuda_runtime.h> __device__ char xx[23]; __shared__ char s2[23]; __global__ void cuCopyTest( char *s1, int start, int end) { char out[23]; char * dest; char * src; int n = end; // initialize shared memory s2 from xxx; dest = &s2[start]; n = end; src = &xx[start]; while(n-- > 0) *dest++ = *src++; __syncthreads(); dest = (&xx[0]+start)+1; n = end; src = s1; while(n-- > 0) *dest++ = *src++; dest = (&out[0])+start; n = end; src = &s2[start]; while(n-- > 0) *dest++ = *src++; }
21,938
#include <stdio.h> #include <cuda.h> #define N 100 #define BLOCKSIZE 32 __global__ void init(int *input) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; if (id < N) input[id] = id + 1; } __global__ void print(int *output) { for (unsigned ii = 0; ii < N; ++ii) printf("%d ", output[ii]); printf("\n"); } __global__ void convolution(int *input, int *filter, int *output, int fsize) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= N) return; //int *filteroutput = (int *)malloc(fsize * sizeof(int)); int sum = 0; int halff = fsize / 2; int istart = id - halff, iend = id + halff + 1; int fstart = 0, fend = fsize; if (istart < 0) { fstart -= istart; istart = 0; } if (iend > N) { fend -= (iend - N); iend = N; } for (unsigned ii = fstart; ii < fend; ++ii) { // filteroutput[ii] = input[id + ii] * filter[ii]; sum += input[istart + ii - fstart] * filter[ii]; } output[id] = sum; } int main() { int *input, *filter, *output; int hf[] = {3, 4, 5, 4, 3}; int fsize = sizeof(hf) / sizeof(*hf); if (fsize % 2 == 0) { printf("Error: Filter size (%d) is even.\n", fsize); exit(1); } cudaMalloc(&input, N * sizeof(int)); cudaMalloc(&filter, fsize * sizeof(int)); cudaMalloc(&output, N * sizeof(int)); cudaMemcpy(filter, hf, fsize * sizeof(int), cudaMemcpyHostToDevice); int nblocks = (N + BLOCKSIZE - 1) / BLOCKSIZE; init<<<nblocks, BLOCKSIZE>>>(input); convolution<<<nblocks, BLOCKSIZE>>>(input, filter, output, fsize); print<<<1, 1>>>(output); cudaDeviceSynchronize(); return 0; }
21,939
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Corrector_gpu.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double GTIME = 1; double *local_time = NULL; cudaMalloc(&local_time, XSIZE*YSIZE); double *step = NULL; cudaMalloc(&step, XSIZE*YSIZE); int *next = NULL; cudaMalloc(&next, XSIZE*YSIZE); unsigned long nextsize = 1; double4 *pos_CH = NULL; cudaMalloc(&pos_CH, XSIZE*YSIZE); double4 *vel_CH = NULL; cudaMalloc(&vel_CH, XSIZE*YSIZE); double4 *a_tot_D = NULL; cudaMalloc(&a_tot_D, XSIZE*YSIZE); double4 *a1_tot_D = NULL; cudaMalloc(&a1_tot_D, XSIZE*YSIZE); double4 *a2_tot_D = NULL; cudaMalloc(&a2_tot_D, XSIZE*YSIZE); double4 *a_H0 = NULL; cudaMalloc(&a_H0, XSIZE*YSIZE); double4 *a3_H = NULL; cudaMalloc(&a3_H, XSIZE*YSIZE); double ETA6 = 1; double ETA4 = 1; double DTMAX = 1; double DTMIN = 1; unsigned int N = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Corrector_gpu<<<gridBlock,threadBlock>>>(GTIME,local_time,step,next,nextsize,pos_CH,vel_CH,a_tot_D,a1_tot_D,a2_tot_D,a_H0,a3_H,ETA6,ETA4,DTMAX,DTMIN,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Corrector_gpu<<<gridBlock,threadBlock>>>(GTIME,local_time,step,next,nextsize,pos_CH,vel_CH,a_tot_D,a1_tot_D,a2_tot_D,a_H0,a3_H,ETA6,ETA4,DTMAX,DTMIN,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Corrector_gpu<<<gridBlock,threadBlock>>>(GTIME,local_time,step,next,nextsize,pos_CH,vel_CH,a_tot_D,a1_tot_D,a2_tot_D,a_H0,a3_H,ETA6,ETA4,DTMAX,DTMIN,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
21,940
// Note that in this model we do not check // the error codes and status of kernel call. #include <cstdio> #include <cmath> __global__ void hello() { printf("Greetings from your GPU\n"); } int main(void) { int count, device; cudaGetDeviceCount(&count); cudaGetDevice(&device); printf("You have in total %d GPUs in your system\n", count); printf("GPU %d will now print a message for you:\n", device); hello<<<1,1>>>(); cudaDeviceSynchronize(); return 0; }
21,941
/* Copiar traspuesta de matriz h_a[F][C] en matriz h_b[C][F] aunque el n.º de hebras de los bloques no divida al n.º de componentes de las matrices */ #include <stdio.h> #define F 25 #define C 43 // matriz original de F filas y C columnas #define H 16 // bloques de H x H hebras (HxH<=512, capacidad cpto. 1.3) __global__ void trspta2(int *dev_a, int *dev_b, int filas, int cols) { __shared__ int s[H*H]; // variable compartida con tantos componentes como hebras tiene un bloque int bbx = blockIdx.x * blockDim.x; // = blockIdx.x * H int bby = blockIdx.y * blockDim.y; // = blockIdx.y * H int ix = bbx + threadIdx.x; int iy = bby + threadIdx.y; int aux; int idt = threadIdx.y * blockDim.x + threadIdx.x; // Identificador de hebra en un bloque int idttr = threadIdx.x * blockDim.y + threadIdx.y; if ((ix<cols)&&(iy<filas)) { /* Si S[H][H] es la matriz representada por s, queremos guardar en S la traspuesta de la submatriz de dev_a leída por el bloque de hebras (para, después, colocar S, en el lugar adecuado de dev_b) */ aux = iy*cols+ix; // Posición (iy,ix) en la matriz representada por dev_a /* Dentro de un bloque, cuando idt aumenta en 1 y threadIdx.y no cambia (threadIdx.x aumenta en 1), aux aumenta en 1. Esto ocurre en turnos de H veces seguidas ya que 0 <= threadIdx.x < blockDim.x = H. Por tanto, usando aux como índice, hay coalescencia en acceso a memoria global cada H accesos. Como la coalescencia máxima es de 16 accesos, conviene que H=16 */ s[idttr] = dev_a[aux]; /* S[threadIdx.x][threadIdx.y]= A[iy][ix] (str[idttr] representa S[threadIdx.x][threadIdx.y]) */ } /* Ahora debemos copiar s a su lugar en dev_b. La esquina superior izda. de s corresponde a la posición (ix,iy) en dev_b con threadIdx.x = threadIdx.y = 0 Es decir, ix*filas+iy con threadIdx.x = threadIdx.y = 0. Esto es, bbx * filas + bby */ int esqsupizda = bbx * filas + bby; /* Si pensamos s como matriz, un recorrido con el índice idt sería un recorrido por filas En S seleccionaríamos S[thr eadIdx.y][threadIdx.x] Por tanto, en dev_b el índice debe ser: esqsupizda + threadIdx.y * filas + threadIdx.x */ __syncthreads(); //Esperar a que todas las hebras lleguen if (((bbx+threadIdx.y)<cols) && ((bby+threadIdx.x)<filas)) dev_b[esqsupizda + threadIdx.y * filas + threadIdx.x] = s[idt]; /* Los límites del if cambian teniendo en cuenta la transposición realizada */ } int main(int argc, char** argv) { int h_a[F][C], h_b[C][F]; int *d_a, *d_b; int i, j, aux, size = F * C * sizeof(int); dim3 hebrasBloque(H, H); // bloques de H x H hebras int numBlf = (F+H-1)/H; // techo de F/H int numBlc = (C+H-1)/H; // techo de C/H dim3 numBloques(numBlc,numBlf); // reservar espacio en el device para d_a y d_b cudaMalloc((void**) &d_a, size); cudaMalloc((void**) &d_b, size); // dar valores a la matriz h_a en la CPU e imprimirlos printf("\nMatriz origen\n"); for (i=0; i<F; i++) { for (j=0; j<C; j++) { aux = i*C+j; h_a[i][j] = aux; printf("%d ", aux); } printf("\n"); } // copiar matriz h_a en d_a cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); // llamar al kernel que obtiene en d_b la traspuesta de d_a trspta2<<<numBloques, hebrasBloque>>>(d_a, d_b, F, C); // copiar matriz d_b en h_b cudaMemcpy(h_b, d_b, size, cudaMemcpyDeviceToHost); // una vez que tenemos los resultados en el host, comprobamos que son correctos for (i=0; i<F; i++) for (j=0; j<C; j++) if (h_a[i][j]!= h_b[j][i]) {printf("error en componente %d %d de matriz de entrada \n", i,j); break;} // imprimir matriz resultado printf("\nMatriz resultado\n"); for (i=0; i<C; i++) { for (j=0; j<F; j++) printf("%d ", h_b[i][j]); printf("\n"); } printf("\n"); cudaFree(d_a); cudaFree(d_b); return 0; }
21,942
#include "includes.h" extern "C" { } __global__ void vdivupdate(const int lengthA, const double alpha, const double *a, const double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthA) { c[i] += alpha*a[i] / b[i]; } }
21,943
#include"stdio.h" #include<cuda_runtime.h> #include <sys/time.h> #define len 1 #define WIDTH 128 // Kernel definition __device__ float& getPos(float *T,int x,int y,int w) { return *(T+y*w+x); } // 处理:正方形,二维热流场 // dN // dW dT dE // dS __global__ void Calc_Cell(float* T0) { float dW,dE,dN,dS,dT; int x=blockIdx.x+1; int y=threadIdx.x+1; int w=WIDTH; for(int i=0;i<100000;i++) { dN=(getPos(T0,x,y,w)-getPos(T0,x,y-1,w))/len; dS=(getPos(T0,x,y+1,w)-getPos(T0,x,y,w))/len; dW=(getPos(T0,x,y,w)-getPos(T0,x-1,y,w))/len; dE=(getPos(T0,x+1,y,w)-getPos(T0,x,y,w))/len; dT=((dS-dN)/len+(dE-dW)/len)*0.1; __syncthreads(); getPos(T0,x,y,w)=getPos(T0,x,y,w)+dT; } } //储存是行主序,然而坐标是列主序 long getCurrentTime() { struct timeval tv; gettimeofday(&tv,NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } int main() { FILE *fp=fopen("a.txt","w"); size_t size=128*WIDTH*sizeof(float); float*d_A; cudaMalloc(&d_A, size); float A[128*WIDTH]={0}; for(int i=0;i<128;i++) A[i]=100; cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); //在这个计算中我们给定边界条件,仅仅考虑内部的运行状态 Calc_Cell<<<126,126>>>(d_A); cudaMemcpy(A, d_A, size, cudaMemcpyDeviceToHost); for(int i=0;i<128;i++) { for(int j=0;j<WIDTH;j++) { fprintf(fp,"%d %d %f \n",i,j,A[i*WIDTH+j]); } } cudaFree(d_A); }
21,944
#include <cuda_runtime_api.h> // FIXME(20160123): commentng out for cuda 7.0. //#include <cuda_fp16.h> #include <assert.h> #include <stdint.h> #include <stdio.h> typedef uint16_t half; #define BANK_OFFSET(idx) ({ __typeof__ (idx) _idx = idx; ((_idx) + ((_idx) / 32)); }) __global__ void map_print_i32_kernel( const int32_t *src, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { printf("DEBUG: print: [%d] %d\n", i, src[i]); } } extern "C" void array_cuda_map_print_i32( const int32_t *src, int n, cudaStream_t stream) { map_print_i32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( src, 32); } __global__ void map_print_f32_kernel( const float *src, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { printf("DEBUG: print: [%d] %g\n", i, src[i]); } } extern "C" void array_cuda_map_print_f32( const float *src, int n, cudaStream_t stream) { map_print_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( src, 32); } __global__ void map_set_constant_i32_kernel( int32_t *src, int n, int32_t c) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { src[i] = c; } } extern "C" void array_cuda_map_set_constant_i32( int32_t *src, int n, int32_t c, cudaStream_t stream) { map_set_constant_i32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( src, n, c); } __global__ void map_set_constant_f32_kernel( float *src, int n, float c) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { src[i] = c; } } extern "C" void array_cuda_map_set_constant_f32( float *src, int n, float c, cudaStream_t stream) { map_set_constant_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( src, n, c); } __global__ void map_cast_u8_to_f32_v( const uint32_t *vsrc, int vn, float *dst, int n) { int vi = threadIdx.x + blockIdx.x * blockDim.x; int i0 = 4 * vi; int i1 = i0 + 1; int i2 = i0 + 2; int i3 = i0 + 3; if (vi < vn) { uint32_t v = vsrc[vi]; float x0 = (float)(v & 0xff); float x1 = (float)((v >> 8) & 0xff); float x2 = (float)((v >> 16) & 0xff); float x3 = (float)((v >> 24) & 0xff); if (i0 < n) { dst[i0] = x0; } if (i1 < n) { dst[i1] = x1; } if (i2 < n) { dst[i2] = x2; } if (i3 < n) { dst[i3] = x3; } } } __global__ void map_cast_u8_to_f32_vs( const uint32_t *vsrc, int vn, float *dst, int n) { __shared__ float cache[4 * (1024 + 32)]; int vi = threadIdx.x + blockIdx.x * blockDim.x; int i0 = threadIdx.x + 4 * blockIdx.x * blockDim.x; int i1 = i0 + 1024; int i2 = i0 + 2 * 1024; int i3 = i0 + 3 * 1024; if (vi < vn) { uint32_t v = vsrc[vi]; cache[BANK_OFFSET(4 * threadIdx.x)] = (float)(v & 0xff); cache[BANK_OFFSET(4 * threadIdx.x + 1)] = (float)((v >> 8) & 0xff); cache[BANK_OFFSET(4 * threadIdx.x + 2)] = (float)((v >> 16) & 0xff); cache[BANK_OFFSET(4 * threadIdx.x + 3)] = (float)((v >> 24) & 0xff); __syncthreads(); if (i0 < n) { dst[i0] = cache[BANK_OFFSET(threadIdx.x)]; } if (i1 < n) { dst[i1] = cache[BANK_OFFSET(threadIdx.x + 1024)]; } if (i2 < n) { dst[i2] = cache[BANK_OFFSET(threadIdx.x + 2 * 1024)]; } if (i3 < n) { dst[i3] = cache[BANK_OFFSET(threadIdx.x + 3 * 1024)]; } } } extern "C" void array_cuda_map_cast_u8_to_f32_vec( const uint8_t *src, int n, float *dst, cudaStream_t stream) { int vn = (n+3)/4; map_cast_u8_to_f32_v<<<(vn+1024-1)/1024, 1024, 0, stream>>>( (const uint32_t *)src, vn, dst, n); } __global__ void map_cast_u8_to_f32_v_n( const uint32_t *vsrc, int vn, float *dst, int n) { int vi = threadIdx.x + blockIdx.x * blockDim.x; int i0 = 4 * vi; int i1 = i0 + 1; int i2 = i0 + 2; int i3 = i0 + 3; if (vi < vn) { uint32_t v = vsrc[vi]; float x0 = (float)(v & 0xff) / 255.0f; float x1 = (float)((v >> 8) & 0xff) / 255.0f; float x2 = (float)((v >> 16) & 0xff) / 255.0f; float x3 = (float)((v >> 24) & 0xff) / 255.0f; if (i0 < n) { dst[i0] = x0; } if (i1 < n) { dst[i1] = x1; } if (i2 < n) { dst[i2] = x2; } if (i3 < n) { dst[i3] = x3; } } } __global__ void map_cast_u8_to_f32_vs_n( const uint32_t *vsrc, int vn, float *dst, int n) { __shared__ float cache[4 * (1024 + 32)]; int vi = threadIdx.x + blockIdx.x * blockDim.x; int i0 = threadIdx.x + 4 * blockIdx.x * blockDim.x; int i1 = i0 + 1024; int i2 = i0 + 2 * 1024; int i3 = i0 + 3 * 1024; if (vi < vn) { uint32_t v = vsrc[vi]; cache[BANK_OFFSET(4 * threadIdx.x)] = (float)(v & 0xff) / 255.0f; cache[BANK_OFFSET(4 * threadIdx.x + 1)] = (float)((v >> 8) & 0xff) / 255.0f; cache[BANK_OFFSET(4 * threadIdx.x + 2)] = (float)((v >> 16) & 0xff) / 255.0f; cache[BANK_OFFSET(4 * threadIdx.x + 3)] = (float)((v >> 24) & 0xff) / 255.0f; __syncthreads(); if (i0 < n) { dst[i0] = cache[BANK_OFFSET(threadIdx.x)]; } if (i1 < n) { dst[i1] = cache[BANK_OFFSET(threadIdx.x + 1024)]; } if (i2 < n) { dst[i2] = cache[BANK_OFFSET(threadIdx.x + 2 * 1024)]; } if (i3 < n) { dst[i3] = cache[BANK_OFFSET(threadIdx.x + 3 * 1024)]; } } } extern "C" void array_cuda_map_cast_u8_to_f32_vec_norm( const uint8_t *src, int n, float *dst, cudaStream_t stream) { int vn = (n+3)/4; map_cast_u8_to_f32_v_n<<<(vn+1024-1)/1024, 1024, 0, stream>>>( (const uint32_t *)src, vn, dst, n); } __global__ void map_cast_f16_to_f32_kernel( const half *src, int n, float *dst) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { half x = src[i]; //float y = __half2float(x); float y = 0; dst[i] = y; } } extern "C" void array_cuda_map_cast_f16_to_f32( const half *src, int n, float *dst, cudaStream_t stream) { map_cast_f16_to_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( src, n, dst); } __global__ void map_cast_f32_to_f16_kernel( const float *src, int n, half *dst) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { float x = src[i]; //half y = __float2half(x); half y = 0; dst[i] = y; } } extern "C" void array_cuda_map_cast_f32_to_f16( const float *src, int n, half *dst, cudaStream_t stream) { map_cast_f32_to_f16_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( src, n, dst); } __global__ void map_add_i32_kernel( const int32_t *src, int n, int32_t *dst) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { dst[i] = dst[i] + src[i]; } } extern "C" void array_cuda_map_add_i32( const int32_t *src, int n, int32_t *dst, cudaStream_t stream) { map_add_i32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( src, n, dst); } __global__ void map_add_f32_kernel( const float alpha, const float *src, int n, const float beta, float *dst) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { dst[i] = beta * dst[i] + alpha * src[i]; } } extern "C" void array_cuda_map_add_f32( const float alpha, const float *src, int n, const float beta, float *dst, cudaStream_t stream) { map_add_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( alpha, src, n, beta, dst); } /*__global__ void map_add_f16_as_f32( const half *src, int n, int n2, half *dst) { int i2 = threadIdx.x + blockIdx.x * blockDim.x; if (i2 < n2) { int i = 2 * i2; if (i + 1 < n) { half2 x16 = ((const half2 *)src)[i2]; half2 y16 = ((half2 *)dst)[i2]; float2 x32 = __half22float2(x16); float2 y32 = __half22float2(y16); y32.x += x32.x; y32.y += x32.y; half2 z16 = __float22half2_rn(y32); ((half2 *)dst)[i2] = z16; } else { half x16 = src[i]; half y16 = dst[i]; float x32 = __half2float(x16); float y32 = __half2float(y16); y32 += x32; half z16 = __float2half(y32); dst[i] = z16; } } }*/ extern "C" void array_cuda_map_add_f16_as_f32( const half *src, int n, half *dst, cudaStream_t stream) { assert(0); /*int n2 = (n + 1) / 2; map_add_f16_as_f32<<<(n2+1024-1)/1024, 1024, 0, stream>>>( src, n, n2, dst);*/ }
21,945
#include "includes.h" __global__ void PoissonImageCloningIteration( const float *fixed, const float *mask, const float *buf1, float *buf2, const int wt, const int ht ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { bool nt_bnd = (yt == 0), wt_bnd = (xt == 0), st_bnd = (yt == ht-1), et_bnd = (xt == wt-1); int North_t = (nt_bnd)? curt:(curt-wt); int West_t = (wt_bnd)? curt:(curt-1); int South_t = (st_bnd)? curt:(curt+wt); int East_t = (et_bnd)? curt:(curt+1); bool isMasked_n = (nt_bnd)? true:(mask[North_t] <= 127.0f); bool isMasked_w = (wt_bnd)? true:(mask[West_t] <= 127.0f); bool isMasked_s = (st_bnd)? true:(mask[South_t] <= 127.0f); bool isMasked_e = (et_bnd)? true:(mask[East_t] <= 127.0f); buf2[curt*3+0] = fixed[curt*3+0]; buf2[curt*3+1] = fixed[curt*3+1]; buf2[curt*3+2] = fixed[curt*3+2]; if(!isMasked_n) { buf2[curt*3+0] += buf1[North_t*3+0]; buf2[curt*3+1] += buf1[North_t*3+1]; buf2[curt*3+2] += buf1[North_t*3+2]; } if(!isMasked_w) { buf2[curt*3+0] += buf1[West_t*3+0]; buf2[curt*3+1] += buf1[West_t*3+1]; buf2[curt*3+2] += buf1[West_t*3+2]; } if(!isMasked_s) { buf2[curt*3+0] += buf1[South_t*3+0]; buf2[curt*3+1] += buf1[South_t*3+1]; buf2[curt*3+2] += buf1[South_t*3+2]; } if(!isMasked_e) { buf2[curt*3+0] += buf1[East_t*3+0]; buf2[curt*3+1] += buf1[East_t*3+1]; buf2[curt*3+2] += buf1[East_t*3+2]; } buf2[curt*3+0] *= 0.25f; buf2[curt*3+1] *= 0.25f; buf2[curt*3+2] *= 0.25f; } }
21,946
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <vector> /** * This file is about the cuda code for the strig match:sunday algorithm. * This main idea to use GPU(cuda) to accelerate the speed of the sunday algorithm. * Random data experiment shows that we can achieve almost ten times speedup. * * In the GPU method we use one thread to do sunday algorithm on small substring(I divide the string into substring with length of 64) * * @author gaozhefeng XIDIAN UNIVERSITY * */ // the block size of the one-dimensional block. #define BLOCKSIZE 256 // the length of the substring. #define STRLEN 64 // the default size of the text. #define DEFAULT_SIZE 16777216 using namespace std; // generate a string randomly void initial(char * text, const int n); // generate the next array from the pattern string. void create_next(int * next, const char * pattern); // to padding some redundant data so that it is convenient for the gpu processing. void pad_text(int * host_text, int size, char * text, int len_t, int boundry); // the sunday algorithm code for CPU. void sunday(vector<int> &location, const char* text, int len_t, const char* pattern, int len_p, const int * next); // the sunday algorithm code for GPU. __global__ void sunday_kernel(int * dev_text, int len_t, int * dev_pattern, int len_p, int * dev_location, int * dev_next, int size); int main(int argc, char* argv[]) { // timing for CPU. clock_t cpu_start, cpu_end; // the time that the CPU code cost. float cpu_time_elapsed = 0.; // timing for GPU. cudaEvent_t gpu_start, gpu_end; // the time that the GPU code cost. float gpu_time_elapsed = 0.; // the size of the text.It can obtain from command line or initial by default. int N; if (argc > 1) { N = atoi(argv[1]); } else { N = DEFAULT_SIZE; } // @@ allocate memory for the raw text.All the char in the text is A-Z. char * text = (char*) calloc(N+1, sizeof(char)); // the random pattern char pattern[] = "MOWZ"; // the host_text is used to holds text which has been padded int * host_text = NULL; // convert char to int int * host_pattern = NULL; // host_next is the next array calculate from the pattern. int * host_next = NULL; // if match in the location i than host_location[i] = 1 else host_location[i] = 0. int * host_location = NULL; // @@ generate the raw text randomly initial(text, N); // get the length of the raw text and pattern. int len_t = N; int len_p = strlen(pattern); // the length of the redundant data int boundry = STRLEN-len_p+1; // the size is length of the padding text int size = (len_t/boundry); // obtain the number substrings if (len_t%boundry > len_p-1) { size += 1; } size *= STRLEN; // @@ allocate memory on CPU host_text = (int *)calloc(size, sizeof(int)); host_location = (int *)calloc(size, sizeof(int)); host_pattern = (int *)calloc(len_p, sizeof(int)); host_next = (int *)calloc(26, sizeof(int)); if (host_text == NULL || host_pattern == NULL || host_next == NULL || host_location == NULL) { printf("Allocating memroy on cpu failed!\n"); return -1; } // obtain the next array create_next(host_next, pattern); // padding the raw text and obtain the padded text pad_text(host_text, size, text, len_t, boundry); // @@ run sunday algorithm on the CPU. vector<int> location; cpu_start = clock(); sunday(location, text, len_t, pattern, len_p, host_next); cpu_end = clock(); cpu_time_elapsed = (float)(cpu_end-cpu_start)/CLOCKS_PER_SEC; printf("CPU sunday done.\n"); // convert char to int for (int i = 0; i < len_p; i++) { host_pattern[i] = (int)pattern[i]; } // data on GPU. int * dev_text = NULL; int * dev_pattern = NULL; int * dev_next = NULL; int * dev_location = NULL; cudaError_t err; // @@ allocate memory on GPU. err = cudaMalloc((void **)&dev_text, size*sizeof(int)); if (err != cudaSuccess) { printf("Allocating memroy on gpu failed!\n"); return -1; } err = cudaMalloc((void **)&dev_pattern, len_p*sizeof(int)); if (err != cudaSuccess) { printf("Allocating memroy on gpu failed!\n"); return -1; } err = cudaMalloc((void **)&dev_next, 26*sizeof(int)); if (err != cudaSuccess) { printf("Allocating memroy on gpu failed!\n"); return -1; } err = cudaMalloc((void **)&dev_location, size*sizeof(int)); if (err != cudaSuccess) { printf("Allocating memroy on gpu failed!\n"); return -1; } // @@ copy data from CPU to GPU. cudaMemcpy(dev_text, host_text, size*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_pattern, host_pattern, len_p*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_next, host_next, 26*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_location, host_location, size*sizeof(int), cudaMemcpyHostToDevice); cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_end); // the dim of block and grid. dim3 dimBlock(BLOCKSIZE,1,1); dim3 dimGrid((size/STRLEN-1)/BLOCKSIZE+1,1,1); cudaEventRecord(gpu_start, 0); // @@ run sunday kernel on the GPU. sunday_kernel<<<dimGrid, dimBlock>>>(dev_text, len_t, dev_pattern, len_p, dev_location, dev_next, size); cudaEventRecord(gpu_end, 0); cudaEventSynchronize(gpu_start); cudaEventSynchronize(gpu_end); cudaEventElapsedTime(&gpu_time_elapsed,gpu_start,gpu_end); printf("GPU sunday done.\n"); // copy data from GPU to CPU. cudaMemcpy(host_location, dev_location, size*sizeof(int), cudaMemcpyDeviceToHost); // show the result that sunday algorithm run on CPU and GPU. printf("\n\nresults...\nthe length of the text is %d\n", N); printf("sunday algorithm on cpu cost time=%f(s)\n", cpu_time_elapsed); printf("sunday algorithm on gpu cost time=%f(s)\n", gpu_time_elapsed/1000.); printf("the matches that cpu code has found:\n"); if (0 == location.size()) { printf("cpu do not find the matches.\n"); } else { for (int i = 0; i < location.size(); i++) { printf("cpu location:%d\n", location[i]); } } printf("the matches that gpu code has found:\n"); int flag = 1; for (int i = 0; i < len_t; i++) { if (host_location[i]) { printf("gpu location:%d\n", i); flag = 0; } } if (flag) { printf("gpu do not find the matches.\n"); } //destory the event cudaEventDestroy(gpu_start); cudaEventDestroy(gpu_end); // free memory on CPU. free(host_text); free(host_pattern); free(host_next); free(host_location); // free memory on GPU. cudaFree(dev_text); cudaFree(dev_pattern); cudaFree(dev_next); cudaFree(dev_location); system("pause"); return 0; } /** * generate raw text randomly * @param text raw text * @param n the length of the text */ void initial(char * text, const int n) { // obtain the seed srand(unsigned(time(0))); for (int i = 0; i < n; i++) { text[i] = rand()%26+'A'; } // add an '\0' at the end. text[n] = '\0'; printf("Initial text done.\n"); } /** * generate next array * @param next next array * @param pattern the pattern text */ void create_next(int * next, const char * pattern) { int len_p = strlen(pattern); // generate delta shift table for (int i = 0; i < 26; i++) { next[i] = len_p + 1; } for (int i = 0; i < len_p; i++) { next[pattern[i]-'A'] = len_p - i; } } /** * sunday code on CPU * @param location record the location where matched * @param text raw text * @param len_t the length of the text * @param pattern pattern text * @param len_p the length of the pattern * @param next next array */ void sunday(vector<int> &location, const char* text, int len_t, const char* pattern, int len_p, const int * next) { // the current postion of the raw text int pos = 0; while(pos < (len_t - len_p+1)) { int i = pos; // j is used to trace in the pattern int j; for (j = 0; j < len_p; j++, i++) { if( text[i] != pattern[j])//doesn't match { if (pos + len_p >= len_t)// all done. return return; // jump to the next position pos += next[text[pos + len_p] - 'A']; break; } } // find a match if ( j == len_p) { // record the match location location.push_back(pos); // jump to the next position pos += 1; } } } /** * padding the raw text * @param host_text padded text * @param size the length of the padded text * @param text raw text * @param len_t the length of the raw text * @param boundry the length of the redundant string data */ void pad_text(int * host_text, int size, char * text, int len_t, int boundry) { //boundry = STRLEN - strlen(pattern) + 1 int offset = 0; for (int i = 0; i < size; i++) { if (i && i%STRLEN == 0) { offset += boundry; } if (offset + i%STRLEN < len_t) { host_text[i] = (int)text[offset + i%STRLEN]; } else { break; } } } /** * sunday code on GPU * @param dev_text padded text on GPU * @param len_t the length of text * @param dev_pattern pattern on GPU * @param len_p the length of pattern * @param dev_location record the location of match * @param dev_next next array on GPU * @param size the length of padded text */ __global__ void sunday_kernel(int * dev_text, int len_t, int * dev_pattern, int len_p, int * dev_location, int * dev_next, int size) { // the number of substring int idx = threadIdx.x + blockIdx.x * blockDim.x; // the start position in the dev_text int offset = idx * STRLEN; int pos = 0; if (idx >= size/STRLEN) return ; while ( pos < (STRLEN-len_p+1)) { // the current position in the dev text int i = pos+offset; // the position in the pattern int j; for (j = 0; j < len_p; ++j, ++i) { if( dev_text[i] != dev_pattern[j])//doesn't match { if (0 == dev_text[pos + offset + len_p])// the substring is done. { return ; } // jump to the next position pos += dev_next[dev_text[pos + offset + len_p] - 65]; break; } } // find a match if ( j == len_p) { // record the location dev_location[idx * (STRLEN-len_p+1) + pos] = 1; pos += 1; } } }
21,947
#include "includes.h" //============================================================================ // Name : PoissonEquationJacobiCuda.cpp // Author : // Version : // Copyright : Your copyright notice // Description : Hello World in C++, Ansi-style //============================================================================ using namespace std; const float PI = 4*atan(1); __global__ void jacobiMethod(float* grid,float* potential, int sizeX,int sizeY,float scale,int noIters,float tolerance){ extern __shared__ float sharedMem[]; /* Shared memory 1st part is grid 2nd part is initial guess 3rd part is current Solution */ // Copying from global to shared memory int threadIdX = threadIdx.x; int threadIdY = threadIdx.y; if (threadIdX == 0 && threadIdY == 0) { //printf("At Beginning\n"); } int bOx = blockIdx.x * blockDim.x; int bOy = blockIdx.y * blockDim.y; //int totalBlockThreadId = threadIdY*blockDim.x + threadIdX; //int blockThreadIdx = threadIdX-noIters; //int blockThreadIdy = threadIdY-noIters; int effBlockSizeX = blockDim.x + 2 * noIters; int effBlockSizeY = blockDim.y + 2 * noIters; int totalSize = sizeX*sizeY; int sharedMemSize = effBlockSizeX*effBlockSizeY; for(int i= threadIdX;i<effBlockSizeX;i+= blockDim.x) for (int j = threadIdY; j < effBlockSizeY; j += blockDim.y) { int currElemSM = i*effBlockSizeX + j; int currElemMain = (i - noIters + bOy)*sizeX + (j - noIters + bOx); if (currElemMain >= 0 && currElemMain < totalSize) { sharedMem[currElemSM] = grid[currElemMain]; sharedMem[currElemSM + sharedMemSize] = potential[currElemMain]; } else { sharedMem[currElemSM] = 0; sharedMem[currElemSM + sharedMemSize] = 0; } sharedMem[currElemSM + 2 * sharedMemSize] = 0; } __syncthreads(); if (threadIdX == 0 && threadIdY == 0) { //printf("Copied to shared memory\n"); } for(int k=0;k<noIters;k++){ for(int i= threadIdX;i<effBlockSizeX;i+= blockDim.x) for(int j= threadIdY;j<effBlockSizeY;j+= blockDim.y){ int currPos = i*effBlockSizeX +j+ sharedMemSize*2; sharedMem[currPos]=0; if(i>1){ sharedMem[currPos]+=(sharedMem[currPos- effBlockSizeY- sharedMemSize]/4); } if(i<effBlockSizeX -1){ sharedMem[currPos]+=(sharedMem[currPos+ effBlockSizeY - sharedMemSize]/4); } if(j>1){ sharedMem[currPos]+=(sharedMem[currPos-1- sharedMemSize]/4); } if(j<effBlockSizeY-1){ sharedMem[currPos]+=(sharedMem[currPos+1- sharedMemSize]/4); } if(i== effBlockSizeX-1||j== effBlockSizeY-1){ //currSolution[currPos]=0; }else if(currPos - 2 * sharedMemSize>=0){ sharedMem[currPos]+=(scale*scale/4* sharedMem[currPos-2* sharedMemSize]); } } __syncthreads(); for (int i = threadIdX; i<effBlockSizeX; i += blockDim.x) for (int j = threadIdY; j<effBlockSizeY; j += blockDim.y) { int currPos = i*effBlockSizeX + j + sharedMemSize * 2; sharedMem[currPos- sharedMemSize]= sharedMem[currPos]; } __syncthreads(); } if (threadIdX == 0 && threadIdY == 0) { //printf("Done computation\n"); } for (int i = threadIdX; i<effBlockSizeX; i += blockDim.x) for (int j = threadIdY; j < effBlockSizeY; j += blockDim.y) { if (i >= noIters && j >= noIters && i < effBlockSizeX - noIters && j < effBlockSizeX - noIters) { int currElemSM = i*effBlockSizeX + j; int currElemMain = (i - noIters + bOy)*sizeX + (j - noIters + bOx); if (currElemMain > 0 && currElemMain < totalSize) { potential[currElemMain] = sharedMem[currElemSM + 2 * sharedMemSize]; } } } if (threadIdX == 0 && threadIdY == 0) { //printf("Copied to memory\n"); } }
21,948
__global__ void vectorSwap(float *A,float *B,const int size) { int i = blockDim.x*blockIdx.x + threadIdx.x; int l = sqrt((float)size); if (i < size) { int j = i/l; int k = i%l; float temp; if((k%2)==0 && k!=l-1){ temp = A[i]; A[i] = A[i+1]; A[i+1] = temp; } __syncthreads(); if(j<k){ B[i] = A[k*l+j]; } else{ B[i] = A[i]; } } }
21,949
#include "includes.h" __global__ void computeMoment(int8_t *readArr, int8_t *writeArr, float *weightArr, int n, int tileSize){ int row_init = blockIdx.x*(blockDim.x*tileSize) + threadIdx.x*tileSize; int col_init = blockIdx.y*(blockDim.y*tileSize) + threadIdx.y*tileSize; // Assign each thread a tileSizeXtileSize tile for(int ii=0; ii<tileSize; ++ii){ for (int jj=0; jj<tileSize; ++jj){ int row = row_init + ii; int col = col_init + jj; // If coordinates are between boundaries // update the write array accordingly if(row < n && col < n){ float influence = 0.0f; for (int i=-2; i<3; i++) { for (int j=-2; j<3; j++) { //add extra n so that modulo behaves like mathematics modulo //that is return only positive values int y = (row+i+n)%n; int x = (col+j+n)%n; influence += weightArr[i*5 + j]*readArr[y*n + x]; } } writeArr[row*n + col] = readArr[row*n + col]; if (influence<-diff) writeArr[row*n + col] = -1; else if (influence>diff) writeArr[row*n + col] = 1; __syncthreads(); } } } }
21,950
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #define N 8 __global__ void reduceVector(float *a, int thread){ int id = blockIdx.x*blockDim.x+threadIdx.x; for(int s=N/2; s>=1; s/=2){ if(id<s) *(a+id) += *(a+id+s); __syncthreads(); } } int main() { int memsize = sizeof(float)*N; float *a =(float *) malloc(memsize); float *d_a; for(int i=0;i<N;++i){ *(a+i)=(float)(rand()%10); printf("%f ,", *(a+i)); } printf("\n"); cudaMalloc(&d_a, memsize); cudaMemcpy(d_a, a, memsize, cudaMemcpyHostToDevice); int thread = (int) ceilf((double)N/2); reduceVector <<<1, thread>>> (d_a, thread); cudaMemcpy(a, d_a, memsize, cudaMemcpyDeviceToHost); printf("%f ,", *a); printf("\n\n"); free(a);cudaFree(d_a); }
21,951
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <sys/resource.h> #include <math.h> double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } __global__ void matDet(double *d_matA, double *detM){ int global_id = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; __shared__ double s_mat[sizeof(double)*64]; __shared__ double s_detAux[16]; int offset = (threadIdx.y * blockDim.x + threadIdx.x)*16; if ((threadIdx.y * blockDim.x + threadIdx.x) < 64){ s_mat[(threadIdx.y * blockDim.x + threadIdx.x)]=d_matA[global_id]; if(threadIdx.y * blockDim.x + threadIdx.x < 16){ s_detAux[threadIdx.y * blockDim.x + threadIdx.x] = 0; } __syncthreads(); if(threadIdx.y * blockDim.x + threadIdx.x < 4){ // printf("globalId:%d|%d|%d|%d|%d\n",global_id,(threadIdx.y * blockDim.x + threadIdx.x)*4,(threadIdx.y * blockDim.x + threadIdx.x)*4+1,(threadIdx.y * blockDim.x + threadIdx.x)*4+2,(threadIdx.y * blockDim.x + threadIdx.x)*4+3); s_detAux[(threadIdx.y * blockDim.x + threadIdx.x)*4] += s_mat[offset] * ( (s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+13])+(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+15])) ); // __syncthreads(); s_detAux[(threadIdx.y * blockDim.x + threadIdx.x)*4+1] += (-1*s_mat[offset+1]) * ( (s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+15])) ); // __syncthreads(); s_detAux[(threadIdx.y * blockDim.x + threadIdx.x)*4+2] += s_mat[offset+2] * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+15])+(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+15])) ); // __syncthreads(); s_detAux[(threadIdx.y * blockDim.x + threadIdx.x)*4+3] += (-1*s_mat[offset+3]) * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+14])+(s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+12])+(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+14])) ); // __syncthreads(); detM[blockIdx.x*4 + (threadIdx.y * blockDim.x + threadIdx.x)] = s_detAux[(threadIdx.y * blockDim.x + threadIdx.x)*4] + s_detAux[(threadIdx.y * blockDim.x + threadIdx.x)*4+1] + s_detAux[(threadIdx.y * blockDim.x + threadIdx.x)*4+2] + s_detAux[(threadIdx.y * blockDim.x + threadIdx.x)*4+3]; // __syncthreads(); } } } __global__ void vecMult2(double *d_matA,unsigned long n,int iteraciones){ int global_id = blockIdx.x *blockDim.x + threadIdx.x; __shared__ double s_mat[32]; unsigned int i; /* for( i = 1; i <= 2; i++) { if(( threadIdx.y * blockDim.x + threadIdx.x )< (int)(64 >> i)){ s_mat[(threadIdx.y * blockDim.x + threadIdx.x)] += s_mat[((threadIdx.y * blockDim.x + threadIdx.x ) + (64 >> i))]; } __syncthreads(); } */ for(i = 1 ; i <= iteraciones ; i++){ if ( global_id < ( n / (1 << (i - 1) ))){ s_mat[threadIdx.x]=d_matA[global_id]; // printf("global:%d||%.2lf||\n",global_id,s_mat[threadIdx.x]); } __syncthreads(); if ( global_id < ( n / (1 << (i - 1) ))){ if (threadIdx.x < 16){ printf("global:%d||%.2lf||%.2lf||\n",global_id,s_mat[threadIdx.x],s_mat[threadIdx.x + 16]); s_mat[threadIdx.x] += s_mat[threadIdx.x + 16]; } } __syncthreads(); if ( global_id < ( n / (1 << (i - 1) ))){ if (threadIdx.x < 16){ // printf("global:%d||%.2lf||\n",global_id,s_mat[threadIdx.x]); d_matA[blockIdx.x * 16 + threadIdx.x] = s_mat[threadIdx.x]; } } __syncthreads(); if(global_id == 0){ printf("-------------------------------------------\n"); } } } int main(int argc, char *argv[]){ if (argc != 2){ printf("Falta argumento: N\n"); return 0; } //declaracion de variables cudaError_t error; unsigned long N = atoi (argv[1]); unsigned long CUDA_BLK = 32,GRID_BLK; unsigned long numBytes = sizeof(double)*4*4; double *matrices,*d_matrices,*d_detM,*detM,timetick; unsigned long i,j; int iteraciones; matrices = (double *)malloc(numBytes*N); detM = (double *)malloc(sizeof(double)*N); for (i = 0; i < 4*4*N; i++){ matrices[i] = 1; } for (i = 0; i < N; i++){ detM[i] = 0; } matrices[2] = 220; matrices[13] = 220; matrices[7] = 6; matrices[14] = 6; //comment cudaMalloc((void **) &d_matrices, numBytes*N); cudaMalloc((void **) &d_detM, sizeof(double)*N); dim3 dimBlock(CUDA_BLK); dim3 dimGrid(N/4); timetick = dwalltime(); iteraciones = log(N) / log(2); cudaMemcpy(d_matrices, matrices, numBytes*N, cudaMemcpyHostToDevice); // CPU -> GPU cudaMemcpy(d_detM, detM, sizeof(double)*N, cudaMemcpyHostToDevice); // CPU -> GPU // matDet<<<dimGrid, dimBlock>>>(d_matrices,d_detM); // cudaThreadSynchronize(); dim3 dimGrid2(4*4*N/CUDA_BLK ); vecMult2<<<dimGrid2, dimBlock>>>(d_matrices,(4*4*N),iteraciones); cudaThreadSynchronize(); cudaMemcpy(matrices, d_matrices, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU cudaMemcpy(detM, d_detM, sizeof(double)*N, cudaMemcpyDeviceToHost); // GPU -> CPU printf("Tiempo para la GPU: %f\n",dwalltime() - timetick); error = cudaGetLastError(); printf("error: %d\n",error); printf("%.2lf|\n",detM[0]); for(i=0; i < 4; i++){ for(j=0; j < 4; j++){ printf("%.2lf|",matrices[i*4+j]); } printf("\n"); } cudaFree(d_matrices); cudaFree(d_detM); free(matrices); free(detM); return 0; }
21,952
# include <stdio.h> # include <stdlib.h> # include <cuda.h> # include <sys/time.h> # include <unistd.h> # define BLOCK_SIZE (32) //# define n 128 //# define n 256 //# define n 512 //# define n 1024 //# define n 2048 //# define n 4096 # define n 8192 # define threshold 1e-8 double rtclock(void) { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } void compare(int N, double *wref, double *w) { double maxdiff,this_diff; int numdiffs; int i,j; numdiffs = 0; maxdiff = 0; for(i=0;i<N;i++) for(j=0;j<N;j++) { this_diff = wref[i*N+j]-w[i*N+j]; if(this_diff < 0) this_diff = -1.0*this_diff; if(this_diff>threshold) { numdiffs++; if(this_diff > maxdiff) maxdiff=this_diff; } } if(numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs, threshold, maxdiff); else printf("No differences found between reference and test versions\n"); } int *mat_mul_ord(int *A, int *B, int *C) { for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) { int sum = 0; for(int k = 0; k < n; k++) sum += A[i*n+k] * B[k*n+j]; C[i*n+j] = sum; } return C; } __global__ void mat_mul_dev(int *A, int *B, int *C) { int x=threadIdx.y+blockIdx.y*blockDim.y; int y=threadIdx.x+blockIdx.x*blockDim.x; int sum=0; if((x<n)&&(y<n)) for (int k=0;k<n;k++) sum += A[x*n+k]*B[y*n+k]; C[x*n+y]=sum; } __global__ void matrixMul(int *A, int *B, int *C) { // Declaration of the shared memory arrays As and Bs used to store the sub-matrices of A and B respectively __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; int w = BLOCK_SIZE; // Block Index int bx = blockIdx.x; int by = blockIdx.y; // Thread Index int tx = threadIdx.x; int ty = threadIdx.y; // Row 'row' and Column 'col' of matrix A or B int col = bx*w + tx; int row = by*w + ty; // Cv is used to store the element of the block sub-matrix that is computed by the thread int Cv = 0; // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for(int k = 0; k < n/w; k++) { // Load the matrices from device memory to shared memory; each thread loads one element of each matrix As[ty][tx] = A[row*n + (k*w + tx)]; Bs[ty][tx] = B[(k*w + ty)*n + col]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; each thread computes one element of the block sub-matrix for(int l = 0; l < w; l++) Cv += As[ty][l] * Bs[l][tx]; } // Write the block sub-matrix to device memory; each thread writes one element C[row*n + col] = Cv; } int main() { int *A, *B, *C, *Cref1, *Cref2; int *A_d, *B_d, *C_d; int i, j; double clkbegin, clkend, t; A = (int *) malloc(n*n*sizeof(int*)); B = (int *) malloc(n*n*sizeof(int*)); C = (int *) malloc(n*n*sizeof(int*)); Cref1 = (int *) malloc(n*n*sizeof(int*)); Cref2 = (int *) malloc(n*n*sizeof(int*)); int size = n*n*sizeof(int); // Initialise the input data on the CPU for(i = 0; i < n; i++) for(j = 0; j < n; j++) { A[i*n+j] = 2;//i+j; B[i*n+j] = 1;//2+i+j; } clkbegin = rtclock(); C = mat_mul_ord(A, B, C); clkend = rtclock(); t = clkend-clkbegin; printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; c[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, C[((n/2)*n)+n/2-1]); // Create corresponding int arrays on the GPU cudaMalloc((void**)&A_d, size); cudaMalloc((void**)&B_d, size); cudaMalloc((void**)&C_d, size); // Copy input data to array on GPU cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice); cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice); // Set the grid and block sizes to launch kernel dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(n/BLOCK_SIZE, n/BLOCK_SIZE); clkbegin = rtclock(); mat_mul_dev<<<grid, block>>>(A_d, B_d, C_d); clkend = rtclock(); t = clkend-clkbegin; cudaMemcpy(Cref1, C_d, size, cudaMemcpyDeviceToHost); printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; c[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, Cref1[((n/2)*n)+n/2-1]); clkbegin = rtclock(); matrixMul<<<grid, block>>>(A_d, B_d, C_d); clkend = rtclock(); t = clkend-clkbegin; // Copy output array from GPU back to CPU cudaMemcpy(Cref2, C_d, size, cudaMemcpyDeviceToHost); // Free up the arrays on the GPU cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); /* for(i = 0; i < n; i++) { for(j = 0; j < n; j++) printf("%d ", C[i*n+j]); printf("\n"); } */ printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; C[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, Cref2[((n/2)*n)+n/2-1]); // printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; c[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, Cref[((n/2)*n)+n/2-1]); compare(n, (double *) C,(double *) Cref1); compare(n, (double *) C,(double *) Cref2); return 0; }
21,953
#include <stdio.h> // For use of the printf function #define N 256 // Number of threads to use #define TPB 256 // Threads PER block /** * Function launched from the CPU and run on the GPU that will display a message * of the format `Hello World! My threadId is x` where x is the the threadId of * the thread found by thread indexing. */ __global__ void helloWorldKernel() { // Calculate a unique ID for the currently running thread by using its // index within a block plus an offset of number of threads before the // current block (blockIdx.x * blockDim.x) int threadId = blockIdx.x * blockDim.x + threadIdx.x; // Print a message of format 'Hello World! My threadId is x' where x is // thread_id printf("Hello World! My threadId is %d\n", threadId); } // Entry point into the program, initiate kernel launch int main() { // Run the helloWorldKernel function on device using N / TPB thread // blocks of TPB threads PER block helloWorldKernel <<<(N / TPB), TPB>>> (); // CRUCIAL! Make execution of the kernel synchronous so that the CPU waits // for all threads in the grid to complete before finishing the program. // Without this you will not see the result of calls to printf in the kernel // function due to the default asynchronous nature of kernel launches cudaDeviceSynchronize(); return 0; }
21,954
#include "includes.h" __global__ void matrixMul(int* A, int* B, int* C, int aF, int aC, int bF, int bC, int cF, int cC) { // Compute each thread's global row and column index int row = (blockIdx.y * blockDim.y) + threadIdx.y; int col = (blockIdx.x * blockDim.x) + threadIdx.x; // Iterate over row, and down column ////c[row * N + col] = 0; if (aC != bF) return; if ((row < aF) && (col < bC)) { for (int k = 0; k < aC; ++k) { // Accumulate results for a single element C[row * cC + col] += A[row * aC + k] * B[k * bC + col]; } } //C[row * aF + col] = 0; }
21,955
// // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #include <time.h> #define N (2048*2048) #define THREADS_PER_BLOCK 512 // // kernel routine // __global__ void dot_product(const int *a, const int *b, int *c) { // each thread in a block sharing the memory, temp __shared__ int temp[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * blockDim.x; temp[threadIdx.x] = a[idx] * b[idx]; __syncthreads(); if (0 == threadIdx.x) { int sum = 0; /* iterate over only threads in the block */ for (int i=0; i<THREADS_PER_BLOCK; ++i) sum += temp[i]; /* Tricks: only works for sm_11... read the simpleAtomicIntrinsics sample */ atomicAdd( c, sum ); } } // // main code // int main(int argc, char **argv) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size = N * sizeof(int); int result = 0; time_t t; // initialise card - legacy code //cutilDeviceInit(argc, argv); srand((unsigned) time(&t)); printf("DEBUG: Size of 'int' type: %lu\n", sizeof(int)); printf("DEBUG: Total footprint size: %d bytes\n", size); // allocate device copies of a, b, c cudaMalloc( (void**)&dev_a, size ); cudaMalloc( (void**)&dev_b, size ); cudaMalloc( (void**)&dev_c, sizeof(int) ); a = (int*)malloc( size ); b = (int*)malloc( size ); c = (int*)malloc( sizeof(int) ); for (int i=0; i<N; i++) { #if 0 a[i] = rand()%N; b[i] = rand()%N; #else a[i] = 5; b[i] = 5; #endif } printf("DEBUG: a[%d]=%d, b[%d]=%d\n",0, a[0], 0, b[0]); printf("DEBUG: a[%d]=%d, b[%d]=%d\n",1, a[1], 1, b[1]); // copy inputs to device cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice ); // the bug is lacking of this line... sigh cudaMemcpy( dev_c, c, sizeof(int), cudaMemcpyHostToDevice ); int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; // launch dot_product() kernel with N parallel blocks printf("INFO: Launching CUDA kernel: dot product with blocks=%d, threads=%d...", blocksPerGrid, THREADS_PER_BLOCK); dot_product<<< blocksPerGrid, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c ); //dot_product<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c ); printf(" Done\n"); printf("DEBUG: c2 is: %d @ %p\n", *c, &c); // copy device result back to host copy of c cudaMemcpy( c, dev_c, sizeof(int), cudaMemcpyDeviceToHost ); printf("DEBUG: c3 is: %d @ %p\n", *c, &c); #if 1 //result = 0; for (int i=0; i<N; i++) { result += a[i] * b[i]; } if (fabs(result - *c) < 1e-5) printf("INFO: PASS\n"); else printf("ERROR: *** FAILED *** sum=%d\n", result); #endif #if 1 printf("DEBUG: a[0]=%d, b[0]=%d\n", a[0], b[0]); printf("DEBUG: a[%d]=%d, b[%d]=%d, c=%d\n", 1, a[1], 1, b[1], *c); #endif cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); free( a ); free( b ); free( c ); cudaDeviceReset(); return 0; }
21,956
#include<time.h> #include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> /* Macro for mapping three dimensional index (ix,iy,iz) to * linear index. The vertical index (z) is running fastest so * that vertical columns are always kept together in memory. */ #define LINIDX(n, ix,iy,iz) ((n.z)*(n.y)*(ix) + (n.z)*(iy) + (iz)) #define GLINIDX(n, ix,iy,iz) ((n->z)*(n->y)*(ix) + (n->z)*(iy) + (iz)) #define BLOCK_SIZE 5 /* Structure for three dimensional grid size */ struct N { int x; // Number of grid points in x-direction int y; // Number of grid points in y-direction int z; // Number of grid points in z-direction }; /* Number of gridpoints */ struct N n; /* Relative residual reduction target */ //const float resreduction = 1.0e-5; #define real float /* parameters of PDE */ const real lambda2 = 1e4; const real omega2 = 1.0; const real delta = 0.0; void apply(const struct N n, const real* x, real* y) { int ix, iy, iz; // grid spacings in all directions real hx = 1./n.x; real hy = 1./n.y; real hz = 1./n.z; real hx_inv2 = 1./(hx*hx); real hy_inv2 = 1./(hy*hy); real hz_inv2 = 1./(hz*hz); //int i; // for(i=0;i<n.x*n.y*n.z;i++) // y[i]=0; for (ix = 0; ix<n.x; ix++) { for (iy = 0; iy<n.y; iy++) { for (iz = 0; iz<n.z; iz++) { // Diagonal element y[LINIDX(n, ix,iy,iz)]=(delta+2.0*omega2 * (hx_inv2 + hy_inv2 + lambda2*hz_inv2))* x[LINIDX(n, ix,iy,iz)]; // Off diagonal elements, enforce homogenous Dirichlet // boundary conditions if (ix>0) y[LINIDX(n, ix,iy,iz)]+= x[LINIDX(n, ix-1,iy,iz)]* (-omega2*hx_inv2); if (ix<n.x-1) y[LINIDX(n, ix,iy,iz)]+= x[LINIDX(n, ix+1,iy,iz)]* (-omega2*hx_inv2); if (iy>0) y[LINIDX(n, ix,iy,iz)]+= x[LINIDX(n, ix,iy-1,iz)]* (-omega2*hy_inv2); if (iy<n.y-1) y[LINIDX(n, ix,iy,iz)]+= x[LINIDX(n, ix,iy+1,iz)]* (-omega2*hy_inv2); if (iz>0) y[LINIDX(n, ix,iy,iz)]+= x[LINIDX(n, ix,iy,iz-1)]* (-omega2*lambda2*hz_inv2); if (iz<n.z-1) y[LINIDX(n, ix,iy,iz)]+= x[LINIDX(n, ix,iy,iz+1)]* (-omega2*lambda2*hz_inv2); } } } } __global__ void gpu_apply(const N *n, const real *x, real *y){//(const N *n, const float* x, float* y){ int ix, iy, iz; //grid spacings in all directions real hx = 1./n->x; real hy = 1./n->y; real hz = 1./n->z; real hx_inv2 = 1./(hx*hx); real hy_inv2 = 1./(hy*hy); real hz_inv2 = 1./(hz*hz); ix=blockIdx.x*BLOCK_SIZE+threadIdx.x; iy=blockIdx.y*BLOCK_SIZE+threadIdx.y; iz=blockIdx.z*BLOCK_SIZE+threadIdx.z; // Diagonal element y[GLINIDX(n, ix,iy,iz)]=(delta+2.0*omega2 * (hx_inv2 + hy_inv2 + lambda2*hz_inv2))* x[GLINIDX(n, ix,iy,iz)]; // Off diagonal elements, enforce homogenous Dirichlet // boundary conditions __syncthreads(); if (ix>0) y[GLINIDX(n, ix,iy,iz)]+= x[GLINIDX(n, ix-1,iy,iz)]* (-omega2*hx_inv2); if (ix<n->x-1) y[GLINIDX(n, ix,iy,iz)]+= x[GLINIDX(n, ix+1,iy,iz)]* (-omega2*hx_inv2); if (iy>0) y[GLINIDX(n, ix,iy,iz)]+= x[GLINIDX(n, ix,iy-1,iz)]* (-omega2*hy_inv2); if (iy<n->y-1) y[GLINIDX(n, ix,iy,iz)]+= x[GLINIDX(n, ix,iy+1,iz)]* (-omega2*hy_inv2); if (iz>0) y[GLINIDX(n, ix,iy,iz)]+= x[GLINIDX(n, ix,iy,iz-1)]* (-omega2*lambda2*hz_inv2); if (iz<n->z-1) y[GLINIDX(n, ix,iy,iz)]+= x[GLINIDX(n, ix,iy,iz+1)]* (-omega2*lambda2*hz_inv2); } int main(){ n.x = 10; n.y = 10; n.z = 10; int i; //clock_t start1, end1;//start2,end2; printf(" parameters\n"); printf(" ==========\n"); printf(" nx = %10d\n",n.x); printf(" ny = %10d\n",n.y); printf(" nz = %10d\n",n.z); printf(" omega2 = %12.6e\n",omega2); printf(" lambda2 = %12.6e\n",lambda2); printf(" delta = %12.6e\n",delta); int len=n.x*n.y*n.z; //cudaMallocHost real *x,*y,*gpu_y; cudaMallocHost((void**)&x,len*sizeof(real)); cudaMallocHost((void**)&y,len*sizeof(real)); cudaMallocHost((void**)&gpu_y,len*sizeof(real)); //float x[len],y[len],gpu_y[len]; for(i=0;i<len;i++){ x[i]=i; } apply(n,x,y); N *h_n,*dev_n; h_n=(struct N*)malloc(sizeof(N)); h_n->x=n.x; h_n->y=n.y; h_n->z=n.z; real *dev_x, *dev_y; //start2=clock(); cudaMalloc((void**)&dev_n,sizeof(N)); cudaMalloc((void**)&dev_x,len*sizeof(real)); cudaMalloc((void**)&dev_y,len*sizeof(real)); int dx,dy,dz; dx=(int)ceil((double)n.x/BLOCK_SIZE); dy=(int)ceil((double)n.y/BLOCK_SIZE); dz=(int)ceil((double)n.z/BLOCK_SIZE); cudaMemcpy(dev_n,h_n,sizeof(N),cudaMemcpyHostToDevice); cudaMemcpy(dev_x,x,len*sizeof(real),cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,BLOCK_SIZE); dim3 dimGrid(dx,dy,dz);//(n.x,n.y,n.z); gpu_apply<<<dimGrid,dimBlock>>>(dev_n,dev_x,dev_y); cudaMemcpy(gpu_y,dev_y,len*sizeof(float),cudaMemcpyDeviceToHost); int k=0; double er=0; for(i=0;i<len;i++){ //printf("%f\n",gpu_y[i]); if(abs(gpu_y[i]-y[i])<1e-12){ k++; er+=abs(gpu_y[i]-y[i]); printf("%f----%f(%d)\n",gpu_y[i],y[i],i); } } printf(" error number= %d(%f)\n",k,er/k); printf("x=%d,y=%d,z=%d\n",dx,dy,dz); cudaFree(dev_n); cudaFree(dev_x); cudaFree(dev_y); return(0); }
21,957
// testing gpu queue (compacted array) #include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/copy.h> #include <stdio.h> #define WARP_SIZE 32 #define NUM_WARPS 16 // blocksize: threads should be less than 1024. #define BLOCK_SIZE (WARP_SIZE * NUM_WARPS) // also blocks should be arranged in 2D (would be 32 bit at 64k x 64k). preferrably in a shape that's closest to square. namespace nscale { namespace gpu { // does not touch other warps template<typename T> inline __device__ void warp_mark(const T* s_in_data, int* s_mark, const int tid) { s_mark[tid] = (s_in_data[tid] > 0 ? 1 : 0); } // s_mark and s_scan pointers point to the starting pos of current warp's data // idx is the id within the warp. // first WARP_SIZE in s_scan is dummy so to avoid warp divergence in warp_scan. // second WARP_SIZE in s_scan is the scan area. // does not touch other warps inline __device__ void init_warp_scan(int* s_mark, int* s_scan, const int idx, const int tid) { s_scan[idx] = 0; // extra padding for the scan part... s_scan[idx + WARP_SIZE] = s_mark[tid]; } // adapted from CudPP. inclusive scan. // does not touch other warps template<int maxlevel> inline __device__ int warp_scan(int* s_scan, const int idx) { int t = s_scan[idx]; if (0 <= maxlevel) { s_scan[idx] = t = t + s_scan[idx - 1]; } if (1 <= maxlevel) { s_scan[idx] = t = t + s_scan[idx - 2]; } if (2 <= maxlevel) { s_scan[idx] = t = t + s_scan[idx - 4]; } if (3 <= maxlevel) { s_scan[idx] = t = t + s_scan[idx - 8]; } if (4 <= maxlevel) { s_scan[idx] = t = t + s_scan[idx -16]; } return s_scan[WARP_SIZE - 1]; // return the total } // s_out_data points to the beginning of the shared array. // s_scan points to the scanned position for this warp // s_scan should be exclusive // return total selected for the warp // touches other warps, but can rely on warp execution ordering. template<typename T> inline __device__ void warp_select(const T* s_in_data, const int* s_mark, const int* s_scan, T* s_out_data, const int offset, const int idx, const int tid, const int warpId) { //if (tid % 32 == 5) printf("%d output is %d\n", tid, s_out_data[tid]); if (warpId == 1) { printf("%d %d scan %d, mark %d\n", warpId, idx, s_scan[idx-1], s_mark[tid]); } if (s_mark[tid] > 0) { if (idx == 1) printf("%d scan position %d, offset %d\n", tid, s_scan[idx-1], offset); s_out_data[s_scan[idx-1] + offset] = s_in_data[tid]; } } // unordered template<typename T> __global__ void unordered_select(const T* in_data, const int dataSize, T* out_data, unsigned int* queue_size) { // initialize the variables const int x = threadIdx.x + blockDim.x * (blockIdx.y + blockIdx.x * gridDim.y); if (x >= dataSize - (dataSize & (WARP_SIZE - 1)) + WARP_SIZE) return; const int idx = threadIdx.x & (WARP_SIZE - 1); const int warpId = threadIdx.x >> 5; //if (threadIdx.x == 0) printf("block %d %d, thread %d, warpid %d, x %d\n", blockIdx.x, blockIdx.y, threadIdx.x, warpId, x); __shared__ int offsets[WARP_SIZE + 1]; // avoid divergence - everyone write... only using NUM_WARPS __shared__ int s_mark[BLOCK_SIZE]; __shared__ int s_scan[NUM_WARPS][WARP_SIZE * 2 + 1]; __shared__ int s_block_scan[WARP_SIZE * 2]; // warp size is 32, block size is 1024, so at most we have 32 warps. so top scan would require 1 warp. __shared__ T s_in_data[BLOCK_SIZE]; __shared__ T s_out_data[BLOCK_SIZE]; __shared__ int curr_pos[1]; // copy in data if (warpId == 0) { offsets[idx] = 0; offsets[WARP_SIZE] = 0; } __syncthreads(); s_out_data[threadIdx.x] = 0; s_in_data[threadIdx.x] = 0; if (x < dataSize) s_in_data[threadIdx.x] = in_data[x]; // compact within this block warp_mark(s_in_data, s_mark, threadIdx.x); // mark the data to be processed init_warp_scan(s_mark, s_scan[warpId], idx, threadIdx.x); offsets[warpId + 1] = warp_scan<5>(s_scan[warpId] + WARP_SIZE, idx); // perform the in warp scan. // now scan the warp offsets - want exclusive scan hence the idx+1. note that this is done by 1 warp only, so need thread sync before and after. __syncthreads(); if (warpId == 0) { init_warp_scan(offsets + 1, s_block_scan, idx, idx); warp_scan<5>(s_block_scan + WARP_SIZE, idx); // printf("warpId %d offsets: %d, blockscan %d %d\n", idx, offsets[idx+1], s_block_scan[idx], s_block_scan[idx + WARP_SIZE]); offsets[idx + 1] = s_block_scan[idx + WARP_SIZE]; printf("222 warpId %d offsets: %d, blockscan %d %d\n", idx, offsets[idx+1], s_block_scan[idx], s_block_scan[idx + WARP_SIZE]); } __syncthreads(); warp_select(s_in_data, s_mark, s_scan[warpId] + WARP_SIZE, s_out_data, offsets[warpId], idx, threadIdx.x, warpId); // compact the data into the block space. //copy the data back out. this block will get a place to write using atomic add. resulting queue has the blocks shuffled // this part is multiblock? int block_len = offsets[WARP_SIZE]; // if (threadIdx.x == 0) // printf("block %d, %d, thread %d, warp %d, total = %d\n", blockIdx.x, blockIdx.y, threadIdx.x, warpId, block_len); int curr_p = 0; if (block_len > 0) { if (threadIdx.x == 0) { curr_pos[0] = atomicAdd(queue_size, block_len); // only done by first thread in the block printf("before block %d %d curr pos %d, block len %d \n ", blockIdx.x, blockIdx.y, curr_pos[0], block_len); } curr_p = curr_pos[0]; // move from a single shared memory location to threads' registers if (threadIdx.x == 10) printf("after block %d %d curr pos %d, block len %d \n ", blockIdx.x, blockIdx.y, curr_p, block_len); if (threadIdx.x < block_len) out_data[curr_p + threadIdx.x] = s_out_data[threadIdx.x]; // dont need to worry about dataSize. queue size is smaller... } if (x < dataSize) out_data[x] = s_out_data[threadIdx.x]; } template<typename T> __global__ void clear(T* out_data, const int dataSize) { const int x = threadIdx.x + blockDim.x * (blockIdx.y + blockIdx.x * gridDim.y); if (x >= dataSize - (dataSize & (WARP_SIZE - 1)) + WARP_SIZE) return; if (x < dataSize) out_data[x] = 0; } // use after the gapped compact. (global sync for all blocks. // step 2 of the compacting. assumes that within each block the values have already been compacted. // also block_pos is already scanned to produce final starting positions for each threadblock. template<typename T> __global__ void compact(const T* in_data, const int dataSize, const int* block_pos, T* out_data, unsigned int* queue_size) { const int x = threadIdx.x + blockDim.x * (blockIdx.y + blockIdx.x * gridDim.y); if (x >= dataSize - (dataSize & (WARP_SIZE - 1)) + WARP_SIZE) return; const int pos = block_pos[(blockIdx.y + blockIdx.x * gridDim.y)]; const int len = block_pos[(blockIdx.y + blockIdx.x * gridDim.y) + 1] - pos; if (threadIdx.x < len) out_data[pos + threadIdx.x] = in_data[x]; // do a global reduction to get the queue size. if (threadIdx.x == 0) atomicAdd(queue_size, len); } // gapped. so need to have another step to remove the gaps... block_pos stores the lengths of the blcok queue for each block template<typename T> __global__ void gapped_select(const T* in_data, const int dataSize, T* out_data, int* block_pos) { const int x = threadIdx.x + blockDim.x * (blockIdx.y + blockIdx.x * gridDim.y); if (x >= dataSize - (dataSize & (WARP_SIZE - 1)) + WARP_SIZE) return; const int idx = threadIdx.x & (WARP_SIZE - 1); const int warpId = threadIdx.x >> 5; //if (blockIdx.x == 0 && threadIdx.x == 0) printf("block %d %d, thread %d, warpid %d, blockdim %d, gridDim %d, x %d\n", blockIdx.x, blockIdx.y, threadIdx.x, warpId, blockDim.x, gridDim.y, x); __shared__ int offsets[WARP_SIZE + 1]; // avoid divergence - everyone write... only using NUM_WARPS __shared__ int s_mark[BLOCK_SIZE]; __shared__ int s_scan[NUM_WARPS][WARP_SIZE * 2 + 1]; __shared__ int s_block_scan[WARP_SIZE * 2]; // warp size is 32, block size is 1024, so at most we have 32 warps. so top scan would require 1 warp. __shared__ T s_in_data[BLOCK_SIZE]; __shared__ T s_out_data[BLOCK_SIZE]; // copy in data if (warpId == 0) { offsets[idx] = 0; offsets[WARP_SIZE] = 0; } __syncthreads(); s_out_data[threadIdx.x] = 0; s_in_data[threadIdx.x] = 0; if (x < dataSize) s_in_data[threadIdx.x] = in_data[x]; // scan the warps warp_mark(s_in_data, s_mark, threadIdx.x); // mark the data to be processed init_warp_scan(s_mark, s_scan[warpId], idx, threadIdx.x); offsets[warpId + 1] = warp_scan<5>(s_scan[warpId] + WARP_SIZE, idx); // perform the in warp scan. // now scan the warp offsets - want exclusive scan hence the idx+1. note that this is done by 1 warp only, so need thread sync before and after. __syncthreads(); if (warpId == 0) { init_warp_scan(offsets + 1, s_block_scan, idx, idx); warp_scan<5>(s_block_scan + WARP_SIZE, idx); //printf("warpId %d offsets: %d, blockscan %d %d\n", idx, offsets[idx+1], s_block_scan[idx], s_block_scan[idx + WARP_SIZE]); offsets[idx + 1] = s_block_scan[idx + WARP_SIZE]; //printf("222 warpId %d offsets: %d, blockscan %d %d\n", idx, offsets[idx+1], s_block_scan[idx], s_block_scan[idx + WARP_SIZE]); block_pos[(blockIdx.y + blockIdx.x * gridDim.y) + 1] = offsets[WARP_SIZE]; } __syncthreads(); // now do the per warp select warp_select(s_in_data, s_mark, s_scan[warpId] + WARP_SIZE - 1, s_out_data, offsets[warpId], idx, threadIdx.x, warpId); // compact the data into the block space. //if (threadIdx.x == 0) printf("tada: %d\n", s_out_data[threadIdx.x]); //copy the data back out. leaving the space between blocks. if (x < dataSize) out_data[x] = s_out_data[threadIdx.x]; // if (threadIdx.x == 0) // printf("block %d, %d, thread %d, warp %d, total = %d\n", blockIdx.x, blockIdx.y, threadIdx.x, warpId, block_pos[(blockIdx.y + blockIdx.x * gridDim.y) + 1]); } // fermi can have maximum of 65K blocks in one dim. //1024 threads - warpscan all, then 1 warp to scan, then everyone add. // s_totals has size of 32. inline __device__ void scan1024(const int* in_data, const int dataSize, int* out_data, int** s_scan, int* s_scan2, int* block_total) { const int x = threadIdx.x + blockDim.x * (blockIdx.y + blockIdx.x * gridDim.y); if (x >= dataSize - (dataSize & (WARP_SIZE - 1)) + WARP_SIZE) return; const int idx = threadIdx.x & (WARP_SIZE - 1); const int warpId = threadIdx.x >> 5; // initialize data: if (threadIdx.x < WARP_SIZE) { s_scan2[idx] = 0; s_scan2[idx + WARP_SIZE] = 0; } s_scan[warpId][idx] = 0; s_scan[warpId][idx + WARP_SIZE] = 0; if (x < dataSize) s_scan[warpId][idx + WARP_SIZE] = in_data[x]; // do the scan s_scan2[warpId+WARP_SIZE] = warp_scan<5>(s_scan[warpId] + WARP_SIZE, idx); __syncthreads(); // do the second pass - only the first block.. if (threadIdx.x < WARP_SIZE) block_total[(blockIdx.y + blockIdx.x * gridDim.y)] = warp_scan<5>(s_scan2 + WARP_SIZE, idx); // inclusive scan __syncthreads(); // now add back to the warps if (x < dataSize) out_data[x] = s_scan[warpId][idx+WARP_SIZE] + s_scan2[warpId + WARP_SIZE - 1]; } // to scan a large amount of data, do it in multilevel way.... allocate the summary array, scan the blocks with a kernel call, then scan the summary array. recurse. then add the results to previous level summary // connectivity: need to have border of 0 ,and should be continuous template <typename T> unsigned int SelectCPUTesting(const T* in_data, const int size, T* out_data) { // cpu unsigned int newId = 0; for (int i = 0; i < size; i++) { if (in_data[i] > 0) { out_data[newId] = in_data[i]; ++newId; } } return newId; } // this functor returns true if the argument is odd, and false otherwise template <typename T> struct GreaterThanConst : public thrust::unary_function<T,bool> { const T k; __host__ __device__ GreaterThanConst(T _k) : k(_k) {} __host__ __device__ bool operator()(T x) { return x > k; } }; // connectivity: need to have border of 0 ,and should be continuous template <typename T> unsigned int SelectThrustScanTesting(const T* in_data, const int size, T* out_data, cudaStream_t stream) { // get data to GPU T *d_in_data, *d_out_data; cudaMalloc(&d_in_data, sizeof(T) * size); cudaMalloc(&d_out_data, sizeof(T) * size); cudaMemcpy(d_in_data, in_data, sizeof(T) * size, cudaMemcpyHostToDevice); cudaMemset(d_out_data, 0, sizeof(T) * size); // thrust thrust::device_ptr<T> queueBegin(d_in_data); thrust::device_ptr<T> queueEnd(d_in_data + size); // can change into transform_iterator to use in the copy operation. the only challenge is don't know queue size, and would still need to compact later... // count // unsigned queueSize = thrust::count_if(queueBegin, queueEnd, GreaterThanConst<T>(0)); thrust::device_ptr<T> queueBegin2(d_out_data); thrust::device_ptr<T> queueEnd2 = thrust::copy_if(queueBegin, queueEnd, queueBegin2, GreaterThanConst<T>(0)); unsigned int queueSize = queueEnd2.get() - queueBegin2.get(); cudaMemcpy(out_data, d_out_data, sizeof(T) * size, cudaMemcpyDeviceToHost); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } cudaThreadSynchronize(); cudaFree(d_in_data); cudaFree(d_out_data); return queueSize; } // warp-scan // connectivity: need to have border of 0 ,and should be continuous template <typename T> unsigned int SelectWarpScanUnorderedTesting(const T* in_data, const int size, T* out_data, cudaStream_t stream) { dim3 threads( BLOCK_SIZE, 1); unsigned int numBlocks = size / threads.x + (size % threads.x > 0 ? 1 : 0); unsigned int minbx = (unsigned int) ceil(sqrt((double)numBlocks)); unsigned int minby = numBlocks / minbx + (numBlocks % minbx > 0 ? 1 : 0); dim3 blocks( minbx, minby ); // get data to GPU T *d_in_data; T *d_out_data; cudaMalloc(&d_in_data, sizeof(T) * size); cudaMalloc(&d_out_data, sizeof(T) * size); cudaMemcpy(d_in_data, in_data, sizeof(T) * size, cudaMemcpyHostToDevice); cudaMemset(d_out_data, 0, sizeof(T) * size); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } cudaThreadSynchronize(); unsigned int *d_queue_size; cudaMalloc(&d_queue_size, sizeof(unsigned int)); cudaMemset(d_queue_size, 0, sizeof(unsigned int)); err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } cudaThreadSynchronize(); printf("blocks: %d, %d, threads: %d\n", blocks.x, blocks.y, threads.x); // ::nscale::gpu::unordered_select<<<blocks, threads, 0, stream >>>(d_in_data, size, d_out_data, d_queue_size); unordered_select<<<blocks, threads >>>(d_in_data, size, d_out_data, d_queue_size); err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } cudaThreadSynchronize(); // get data off gpu unsigned int queue_size = 0; cudaMemcpy((void*)&queue_size, (void*)d_queue_size, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(out_data, d_out_data, sizeof(T) * size, cudaMemcpyDeviceToHost); err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } cudaThreadSynchronize(); cudaFree(d_in_data); cudaFree(d_out_data); cudaFree(d_queue_size); return queue_size; } // warp-scan // connectivity: need to have border of 0 ,and should be continuous template <typename T> unsigned int SelectWarpScanOrderedTesting(const T* in_data, const int size, T* out_data, cudaStream_t stream) { dim3 threads( BLOCK_SIZE, 1); unsigned int numBlocks = size / threads.x + (size % threads.x > 0 ? 1 : 0); unsigned int minbx = (unsigned int) ceil(sqrt((double)numBlocks)); unsigned int minby = numBlocks / minbx + (numBlocks % minbx > 0 ? 1 : 0); dim3 blocks( minbx, minby ); // get data to GPU T *d_in_data; T *d_out_data; T *d_out_data2; cudaMalloc((void **)&d_in_data, sizeof(T) * size); cudaMalloc((void **)&d_out_data, sizeof(T) * size); cudaMalloc((void **)&d_out_data2, sizeof(T) * size); cudaMemcpy(d_in_data, in_data, sizeof(T) * size, cudaMemcpyHostToDevice); cudaMemset(d_out_data, 0, sizeof(T) * size); cudaMemset(d_out_data2, 0, sizeof(T) * size); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } cudaThreadSynchronize(); int *d_block_pos; cudaMalloc(&d_block_pos, sizeof(int) * blocks.x * blocks.y); cudaMemset(d_block_pos, 0, sizeof(int) * blocks.x * blocks.y); unsigned int *d_queue_size; cudaMalloc(&d_queue_size, sizeof(unsigned int)); cudaMemset(d_queue_size, 0, sizeof(unsigned int)); err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } cudaThreadSynchronize(); printf("ordered blocks: %d, %d, threads: %d, size %d \n", blocks.x, blocks.y, threads.x, size); // ::nscale::gpu::gapped_select<<<blocks, threads, 0, stream >>>(d_in_data, size, d_out_data, d_block_pos); // ::nscale::gpu::compact <<<blocks, threads, 0, stream >>>(d_out_data, size, d_block_pos, d_out_data2); gapped_select<<<blocks, threads >>>(d_in_data, size, d_out_data, d_block_pos); compact <<<blocks, threads >>>(d_out_data, size, d_block_pos, d_out_data2, d_queue_size); err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } cudaThreadSynchronize(); // get data off gpu unsigned int queue_size ; cudaMemcpy((void*)&queue_size, (void*)d_queue_size, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(out_data, d_out_data2, sizeof(T) * size, cudaMemcpyDeviceToHost); err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } cudaThreadSynchronize(); cudaFree(d_in_data); cudaFree(d_out_data); cudaFree(d_out_data2); cudaFree(d_block_pos); cudaFree(d_queue_size); return queue_size; } template unsigned int SelectCPUTesting<int>(const int* in_data, const int size, int* out_data); template unsigned int SelectThrustScanTesting<int>(const int* in_data, const int size, int* out_data, cudaStream_t stream); template unsigned int SelectWarpScanUnorderedTesting<int>(const int* in_data, const int size, int* out_data, cudaStream_t stream); template unsigned int SelectWarpScanOrderedTesting<int>(const int* in_data, const int size, int* out_data, cudaStream_t stream); }}
21,958
#include "includes.h" /** * Programma che simula il comportamento del gpdt per * la risoluzione di un kernel di una serie di * valori di dimensione variabile utilizzando la * tecnologia cuda. * compilare con: * nvcc -o simil_gpdt_si_cuda simil_gpdt_si_cuda.cu * lanciare con: * ./simil_gpdt_si_cuda [numero vettori] [numero componenti] [numero di righe da calcolare] [tipo di kernel] [grado(int)/sigma(float)] **/ using namespace std; /** * Funzione che riempie i vettori con numeri * casuali compresi tra 0 e 99. **/ __global__ void Kernel_polimoniale(float *Vd, float *Ris, int N, int C, int dim_indici, int *ind, int *Vp, int *Vnp, int nr_max_val, int s) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int j; int pos; int tmp_ind; float pol; float tmp; for ( ; x < N ; x+=blockDim.x * gridDim.x) { for( ; y < dim_indici; y+=blockDim.y * gridDim.y) { tmp_ind = ind[y]; tmp = 1.0; pol = 0.0; int Nr_val = Vnp[x]; for(j = 0; j < Nr_val; j++) { pos = Vp[x * nr_max_val + j]; pol = pol + (Vd[x * C + pos] * Vd[tmp_ind * C + pos]); } pol = pol + 1; for(j = 0; j < s; j++) { tmp = tmp * pol; } //Ris[x * dim_indici + y] = tmp; Ris[y * N + x ] = tmp; } } }
21,959
// http://cuda-programming.blogspot.com/2013/01/what-is-constant-memory-in-cuda.html //STL #include <iostream> __constant__ float d_angle[ 360 ]; //constant memory LUT candidate __global__ void test_kernel( float* d_array ); int main( int argc, char** argv ) { unsigned size = 3200; float* d_array; float h_angle[ 360 ]; //allocate device memory cudaMalloc( ( void** ) &d_array, size * sizeof( float ) ); //initialize allocated memory cudaMemset( d_array, 0, sizeof( float ) * size ); //initialize angle array on host for( unsigned loop=0; loop < 360; loop++ ) h_angle[ loop ] = acos( -1.0f ) * loop/ 180.0f; //copy host angle data to constant memory cudaMemcpyToSymbol( d_angle, h_angle, 360 * sizeof( float ) ); test_kernel<<< size / 64, 64>>>( d_array ); //constant variable view float DtH_angle[ 360 ]; cudaMemcpyFromSymbol( DtH_angle, d_angle, 360 * sizeof( float ) ); for ( unsigned i = 0; i < 360; i++ ) { printf( "[ind=%02i]: h_angle: [ %.2f ]; DtH_angle: [ %.2f ] \n", i, h_angle[ i ], DtH_angle[ i ] ); } //free device memory cudaFree( d_array ); cudaFree( d_angle ); return 0; } __global__ void test_kernel(float* d_array) { //calculate each thread global index unsigned index = blockIdx.x * blockDim.x + threadIdx.x; #pragma unroll 10 for( unsigned loop=0; loop < 360; loop++ ) d_array[ index ] = d_array[ index ] + d_angle[ loop ]; return; }
21,960
#include <stdio.h> #include <math.h> #define N 8 #define THREAD_PER_BLOCK 2 __global__ void multiply(int * in1, int * in2, int * out, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int startrow = (index / size) * size; int startcol = index % size; int i; int sum = 0; for(i = 0; i < size; ++i) { sum += in1[startrow + i] * in2[startcol + i * size]; } out[index] = sum; } int main() { int * in1, * in2, * out; int * d_in1, * d_in2, * d_out; int size = N * N * sizeof(int); int i; cudaMalloc((void**)&d_in1, size); cudaMalloc((void**)&d_in2, size); cudaMalloc((void**)&d_out, size); in1 = (int *)malloc(size); in2 = (int *)malloc(size); out = (int *)malloc(size); for(i = 0; i<N*N; ++i) { in1[i] = i%N; in2[i] = i%N -1; } cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); multiply<<< N*N/THREAD_PER_BLOCK, THREAD_PER_BLOCK >>>(d_in1, d_in2, d_out, N); cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); for(i=0; i<N*N; ++i) { printf("%2d ", in1[i]); if((i+1)%N == 0) { printf("\n"); } } printf("--------\n"); for(i=0; i<N*N; ++i) { printf("%2d ", in2[i]); if((i+1)%N == 0) { printf("\n"); } } printf("--------\n"); for(i=0; i<N*N; ++i) { printf("%2d ", out[i]); if((i+1)%N == 0) { printf("\n"); } } free(in1); free(in2); free(out); cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out); return 0; }
21,961
#include <stdlib.h> #include <stdio.h> #define NUM_BLOCKS 20 __device__ int* dataptr[NUM_BLOCKS]; // Per-block pointer __global__ void allocmem() { // Only the first thread in the block does the allocation // since we want only one allocation per block. if (threadIdx.x == 0) dataptr[blockIdx.x] = (int*)malloc(blockDim.x * 4); __syncthreads(); // Check for failure if (dataptr[blockIdx.x] == NULL) return; // Zero the data with all threads in parallel dataptr[blockIdx.x][threadIdx.x] = 0; } // Simple example: store thread ID into each element __global__ void usemem() { int* ptr = dataptr[blockIdx.x]; if (ptr != NULL) ptr[threadIdx.x] += threadIdx.x; } // Print the content of the buffer before freeing it __global__ void freemem() { int* ptr = dataptr[blockIdx.x]; if (ptr != NULL) printf("Block %d, Thread %d: final value = %d\n", blockIdx.x, threadIdx.x, ptr[threadIdx.x]); // Only free from one thread! if (threadIdx.x == 0) free(ptr); } int main() { cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128*1024*1024); // Allocate memory allocmem<<< NUM_BLOCKS, 10 >>>(); // Use memory usemem<<< NUM_BLOCKS, 10 >>>(); usemem<<< NUM_BLOCKS, 10 >>>(); usemem<<< NUM_BLOCKS, 10 >>>(); // Free memory freemem<<< NUM_BLOCKS, 10 >>>(); cudaDeviceSynchronize(); return 0; }
21,962
#include "includes.h" __global__ void createLookupKernel(const int* inds, int total, int* output) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < total) output[inds[idx]] = idx; }
21,963
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* example usage: in advection in combination with autoTester: rm rooflineResults.m rooflineScript.m ../../utilities/autoTester/autoTester "../../utilities/roofline/roofline ./advectionMain" */ #include <unistd.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include "cuda.h" #define mymax(a,b) ((a>b)?(a):(b)) __global__ void copyKernel(const int N, const float * __restrict__ a, float * __restrict__ b){ int n= threadIdx.x + blockIdx.x*blockDim.x; if(n<N) b[n] = a[n]; } int main(int argc, char **argv){ if(argc!=2){ printf("usage: ./roofline executable \n"); exit(-1); } char *executable = strdup(argv[1]); char cmd1[BUFSIZ], cmd2[BUFSIZ]; sprintf(cmd1, "nvprof -u ms --csv --metrics dram_read_throughput,dram_write_throughput,dram_write_transactions,dram_read_transactions,flop_dp_efficiency,flop_count_dp %s 2> out1", executable); printf("cmd1 = `%s`\n", cmd1); system(cmd1); sprintf(cmd2, "nvprof -u ms --csv %s 2> out2 ", executable); printf("cmd2 = `%s`\n", cmd2); system(cmd2); char buf[BUFSIZ]; char *token; char *rest = buf; FILE *fp2 = fopen("out2", "r"); int Nkernels = 0; int maxNkernels = 1000; char **kernelNames = (char**) calloc(maxNkernels, sizeof(char*)); double *kernelTime = (double*) calloc(maxNkernels, sizeof(double)); long long int *kernelFlopCount = (long long int*) calloc(maxNkernels, sizeof(long long int)); double *kernelFlopEfficiency = (double*) calloc(maxNkernels, sizeof(double)); double *kernelReadThroughput = (double*) calloc(maxNkernels, sizeof(double)); double *kernelWriteThroughput = (double*) calloc(maxNkernels, sizeof(double)); long long int *kernelBytesRead = (long long int*) calloc(maxNkernels, sizeof(long long int)); long long int *kernelBytesWritten = (long long int*) calloc(maxNkernels, sizeof(long long int)); double *kernelMaxEmpiricalBandwidth = (double*) calloc(maxNkernels, sizeof(double)); char *line; do{ line = fgets(buf, BUFSIZ, fp2); rest = buf; if(line && strstr(rest, "GPU activities") && strstr(rest, "advection") ){ // AHEM strtok(rest, ","); for(int n=0;n<6;++n){ token = strtok(NULL, ","); } kernelTime[Nkernels] = atof(token)/1.e3;// convert from ns to s token = strtok(NULL, "\""); kernelNames[Nkernels] = strdup(strtok(token, "(")); printf("kernel Name = %s, took %lg seconds\n", kernelNames[Nkernels], kernelTime[Nkernels]); ++Nkernels; } }while(line!=NULL); typedef struct{ double dram_read_throughput; double dram_write_throughput; double flop_dp_efficiency; } performance; FILE *fp1 = fopen("out1", "r"); do{ fgets(buf, BUFSIZ, fp1); }while(!strstr(buf, "Invocations")); // slight dangerous do{ // assume "Device","Kernel","Invocations","Metric Name","Metric Description","Min","Max","Avg" line = fgets(buf, BUFSIZ, fp1); if(line && strstr(buf, "advection")){// AHEM int knl; for(knl = 0;knl<Nkernels;++knl){ if(strstr(buf, kernelNames[knl])){ // printf("buf=%s\n", buf); rest = strdup(buf); token = strtok(rest, "\""); for(int n=0;n<6;++n){ token = strtok(NULL, "\""); // printf("token=%s\n", token); } token = strtok(NULL, ","); token = strtok(NULL, ","); token = strtok(NULL, ","); // printf("token#=%s\n", token); double val; long long int cnt; if(strstr(buf, "flop_dp_efficiency")){ sscanf(token, "%lf", &val); kernelFlopEfficiency[knl] = val; } if(strstr(buf, "dram_read_throughput")){ sscanf(token, "%lf", &val); // printf("dram_read %lf\n", val); kernelReadThroughput[knl] = val; } if(strstr(buf, "dram_write_throughput")){ sscanf(token, "%lf", &val); // printf("dram_write %lf\n", val); kernelWriteThroughput[knl] = val; } if(strstr(buf, "flop_count_dp")){ sscanf(token, "%lld", &cnt); kernelFlopCount[knl] = cnt; } if(strstr(buf, "dram_read_transactions")){ sscanf(token, "%lld", &cnt); kernelBytesRead[knl] = cnt; } if(strstr(buf, "dram_write_transactions")){ sscanf(token, "%lld", &cnt); kernelBytesWritten[knl] = cnt; } break; } } } }while(line); fclose(fp1); fclose(fp2); // now benchmark memory on device // profile big copy double maxBWest = 0; { long long int bytes = 2*1024*1024*(long long int)1024; void *o_a, *o_b; cudaMalloc(&o_a, bytes/2); cudaMalloc(&o_b, bytes/2); cudaDeviceSynchronize(); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); dim3 B(256,1,1); dim3 G( ((bytes/8) + 255)/256, 1, 1); int Ntests = 1; for(int n=0;n<Ntests;++n){ #if 0 cudaMemcpy(o_a, o_b, (bytes/2), cudaMemcpyDeviceToDevice); cudaMemcpy(o_b, o_a, (bytes/2), cudaMemcpyDeviceToDevice); #endif copyKernel <<< G, B >>> (bytes/8, (float*) o_a, (float*) o_b); copyKernel <<< G, B >>> (bytes/8, (float*) o_b, (float*) o_a); } cudaEventRecord(end); cudaEventSynchronize(end); cudaDeviceSynchronize(); float elapsed; cudaEventElapsedTime(&elapsed, start, end); elapsed /= (Ntests*1000.); maxBWest = 2*bytes/(elapsed*1.e9); cudaFree(&o_a); cudaFree(&o_b); } int knl; for(knl = 0;knl<Nkernels;++knl){ char resultsName[BUFSIZ]; sprintf(resultsName, "%s.dat", kernelNames[knl]); FILE *fpResults = fopen(resultsName, "a"); fprintf(fpResults, "%%%% arithmeticIntensity, perf, kernelMaxEmpiricalBandwidth, maxEstimateGflops, maxEstimatedBandwidth, bytes\n"); // long long int bytes = (kernelReadThroughput[knl]+kernelWriteThroughput[knl])*kernelTime[knl]*1.e9; long long int bytes = (kernelBytesRead[knl]+kernelBytesWritten[knl])*32; long long int flops = kernelFlopCount[knl]; double arithmeticIntensity = (double)flops/bytes; double perf = (flops/kernelTime[knl])/1.e9; // convert to GFLOPS/s printf("perf = %lf, eff = %lf\n",perf, kernelFlopEfficiency[knl]); double maxGFLOPSest = 100*perf/kernelFlopEfficiency[knl]; // since efficiency is given in percent void *o_a, *o_b; cudaMalloc(&o_a, bytes/2); cudaMalloc(&o_b, bytes/2); cudaDeviceSynchronize(); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); dim3 B(256,1,1); dim3 G( ((bytes/8) + 255)/256, 1, 1); int Ntests = 1; for(int n=0;n<Ntests;++n){ #if 0 cudaMemcpy(o_a, o_b, (bytes/2), cudaMemcpyDeviceToDevice); cudaMemcpy(o_b, o_a, (bytes/2), cudaMemcpyDeviceToDevice); #endif copyKernel <<< G, B >>> (bytes/8, (float*) o_a, (float*) o_b); copyKernel <<< G, B >>> (bytes/8, (float*) o_b, (float*) o_a); } cudaEventRecord(end); cudaEventSynchronize(end); cudaDeviceSynchronize(); float elapsed; cudaEventElapsedTime(&elapsed, start, end); elapsed /= (Ntests*1000.); // maxBWest = 2*bytes/(elapsed*1.e9); cudaFree(&o_a); cudaFree(&o_b); kernelMaxEmpiricalBandwidth[knl] = (2.*bytes/elapsed)/1.e9; // convert max empirical bw for this vector size to GB/s fprintf(fpResults, "%lg, %lg, %lg, %lg, %lg, %lg\n", arithmeticIntensity, perf, kernelMaxEmpiricalBandwidth[knl], maxGFLOPSest, maxBWest, bytes); fflush(fpResults); fclose(fpResults); } return 0; }
21,964
#include "cuda_runtime.h" #include "device_launch_parameters.h" #define __CUDACC_RTC__ #define __CUDACC__ #include <device_functions.h> #include <iostream> #include <cstdio> #include <cstdlib> #include <stdio.h> #include <stdlib.h> #include <algorithm> typedef struct { int width; int height; float* elements; int step; } Matrix; using namespace std; #define BLOCK_SIZE 4 __global__ void MatrixMulKernel(const Matrix, const Matrix, Matrix); void MatrixMul(const Matrix A, const Matrix B, Matrix C) { Matrix d_A; d_A.width = d_A.step = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaError_t err = cudaMalloc(&d_A.elements, size); cout << "CUDA malloc A: " << cudaGetErrorString(err) << endl; cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = d_B.step = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); err = cudaMalloc(&d_B.elements, size); cout << "CUDA malloc B: " << cudaGetErrorString(err) << endl; cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); Matrix d_C; d_C.width = d_C.step = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); err = cudaMalloc(&d_C.elements, size); cout << "CUDA malloc C: " << cudaGetErrorString(err) << endl; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatrixMulKernel <<<dimGrid, dimBlock>>> (d_A, d_B, d_C); err = cudaThreadSynchronize(); cout << "Run kernel: " << cudaGetErrorString(err) << endl; err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); cout << "Copy C off of device: " << cudaGetErrorString(err) << endl; cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.step + col]; } __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.step + col] = value; } __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.step = A.step; Asub.elements = &A.elements[A.step * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } __global__ void MatrixMulKernel(Matrix A, Matrix B, Matrix C) { int blockRow = blockIdx.y; int blockCol = blockIdx.x; Matrix Csub = GetSubMatrix(C, blockRow, blockCol); float Cvalue = 0.0; int row = threadIdx.y; int col = threadIdx.x; for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { Matrix Asub = GetSubMatrix(A, blockRow, m); Matrix Bsub = GetSubMatrix(B, m, blockCol); __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); __syncthreads(); for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; __syncthreads(); } SetElement(Csub, row, col, Cvalue); } int main() { Matrix A, B, C; int a1, a2, b1, b2; a1 = BLOCK_SIZE; a2 = BLOCK_SIZE; b1 = a2; b2 = BLOCK_SIZE; A.height = a1; A.width = a2; A.elements = (float*)malloc(A.width * A.height * sizeof(float)); B.height = b1; B.width = b2; B.elements = (float*)malloc(B.width * B.height * sizeof(float)); C.height = A.height; C.width = B.width; C.elements = (float*)malloc(C.width * C.height * sizeof(float)); for (int i = 0; i < A.height; i++) for (int j = 0; j < A.width; j++) A.elements[i * A.width + j] = rand() % 10; for (int i = 0; i < B.height; i++) for (int j = 0; j < B.width; j++) B.elements[i * B.width + j] = rand() % 10; MatrixMul(A, B, C); cout << "---Result of calculating:" << endl; cout << "Martix A: " << endl; for (int i = 0; i < A.height; i++) { for (int j = 0; j < A.width; j++) cout << A.elements[i * A.width + j] << " "; cout << endl; } cout << endl; cout << "Martix B: " << endl; for (int i = 0; i < B.height; i++) { for (int j = 0; j < B.width; j++) cout << B.elements[i * B.width + j] << " "; cout << endl; } cout << endl; cout << "Martix C: " << endl; for (int i = 0; i < C.height; i++) { for (int j = 0; j < C.width; j++) cout << C.elements[i * C.width + j] << " "; cout << endl; } cout << endl; return 0; }
21,965
#include <stdio.h> __global__ void decode (char *originalMessage, char *decodedMessage); int main (int argc, char *argv[]) { //-------- Testing parameters --------// if (argc != 2){ printf("Incorrect number of parameters :(\n"); printf("Try: \"./DecodeEmail2FULP <filename>\"\n"); exit(0); } //-------- Reading file --------// FILE *inputFile = fopen(argv[1], "r"); if (inputFile == NULL) { fprintf(stderr, "File could not be opened :P\n"); printf("Make sure you spelled the name of your file correctly.\n"); exit(0); } //-------- Calculating size of file and buffers--------// fseek(inputFile, 0, SEEK_END); int messageSize = ftell(inputFile); fseek(inputFile, 0, SEEK_SET); messageSize++; int memorySize = messageSize * sizeof(char); char message[messageSize], decodedMessage[messageSize]; char *dev_message, *dev_decodedMsg; //-------- Reading file into buffer --------// //File is expected to be a single line without change line characters. while(fgets(message, messageSize, inputFile)) { printf("%s\n", message); } message[messageSize-1] = '\0'; fclose(inputFile); printf("Decoding original message:\n %s\n", message); //-------- Executing CUDA code --------// cudaMalloc((void**)&dev_message, memorySize); cudaMalloc((void**)&dev_decodedMsg, memorySize); cudaMemcpy(dev_message, message, memorySize, cudaMemcpyHostToDevice); decode<<<1,messageSize>>>(dev_message, dev_decodedMsg); cudaThreadSynchronize(); cudaMemcpy(decodedMessage, dev_decodedMsg, memorySize, cudaMemcpyDeviceToHost); decodedMessage[messageSize-1] = '\0'; printf("Decoded message is: \n%s\n",decodedMessage); cudaFree(dev_message); cudaFree(dev_decodedMsg); exit(0); } __global__ void decode (char *originalMessage, char *decodedMessage) { // Super secret and complicated decryption algorithm. int i = threadIdx.x; decodedMessage[i] = originalMessage[i] - 1; }
21,966
#include "includes.h" __global__ void cu_minMaxLoc(const float* src, float* minValue, float* maxValue, int* minLoc, int* maxLoc, float* minValCache, float* maxValCache, int* minLocCache, int* maxLocCache, const int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; //int stride = blockDim.x * gridDim.x; float val = src[0]; int loc = 0; if(tid < n){ val = src[tid]; loc = tid; } maxValCache[threadIdx.x] = val; minValCache[threadIdx.x] = val; maxLocCache[threadIdx.x] = loc; minLocCache[threadIdx.x] = loc; __syncthreads(); // contiguous range pattern for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){ if(threadIdx.x < offset){ // add a partial sum upstream to our own if(maxValCache[threadIdx.x] >= maxValCache[threadIdx.x + offset]){ ; }else{ maxValCache[threadIdx.x] = maxValCache[threadIdx.x + offset]; maxLocCache[threadIdx.x] = maxLocCache[threadIdx.x + offset]; } if(minValCache[threadIdx.x] <= minValCache[threadIdx.x + offset]){ ; }else{ minValCache[threadIdx.x] = minValCache[threadIdx.x + offset]; minLocCache[threadIdx.x] = minLocCache[threadIdx.x + offset]; } } // wait until all threads in the block have // updated their partial sums __syncthreads(); } // thread 0 writes the final result if(threadIdx.x == 0){ minValue[blockIdx.x] = minValCache[0]; maxValue[blockIdx.x] = maxValCache[0]; minLoc[blockIdx.x] = minLocCache[0]; maxLoc[blockIdx.x] = maxLocCache[0]; } }
21,967
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) { if (comp <= (var_4 - var_5 + (-1.4965E-35f + +1.1260E34f))) { if (comp >= +1.7005E34f + var_6 - var_7 - +1.7973E-36f - floorf(-1.0959E34f + +1.7362E-43f)) { for (int i=0; i < var_1; ++i) { float tmp_1 = (var_8 * var_9 - +1.7597E36f + +1.4203E-37f); float tmp_2 = -1.5612E34f; comp = tmp_2 * tmp_1 - ldexpf(-1.0539E34f - var_10, 2); for (int i=0; i < var_2; ++i) { float tmp_3 = (var_11 - expf(+1.4455E34f * (+1.9387E8f + +1.6840E36f))); float tmp_4 = +1.7951E-11f; comp += tmp_4 * tmp_3 / -1.0912E-37f + -1.5211E36f; comp += (-1.5596E35f - (+1.1210E-37f * var_12 - (var_13 * var_14))); } if (comp < (var_15 + var_16 / (var_17 * (+1.5478E-16f + logf((-0.0f * var_18 * +1.8467E-44f / (var_19 * var_20))))))) { comp += +1.9018E-44f / var_21; } for (int i=0; i < var_3; ++i) { comp = powf(+1.4150E-43f, -1.4939E-41f / ldexpf((var_22 * +1.1751E-6f * (-1.3135E-43f + var_23 + +1.1912E-35f)), 2)); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24); cudaDeviceSynchronize(); return 0; }
21,968
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MINREAL -1024.0 #define MAXREAL 1024.0 #define FAST_RED #define ACCURACY 0.0001 #define NUM_OF_GPU_THREADS 256 void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void vecFillRand(int N, float *vec) { int i; for (i = 0; i < N; i++) vec[i] = (rand() / (float)RAND_MAX)*(MAXREAL - MINREAL) + MINREAL; } float seq_dotProduct(float *a, float *b, int n) { int i; float dp; dp = 0; for (i = 0; i < n; i++) { dp += a[i] * b[i]; } return dp; } // krenel __global__ void dotProduct(float *a, float *b, float *c, int n) { __shared__ float temp[NUM_OF_GPU_THREADS]; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < n) temp[threadIdx.x] = a[idx] * b[idx]; else temp[threadIdx.x] = 0.0f; __syncthreads(); #ifdef FAST_RED // assumes block dimension is a power of 2 for (int i = blockDim.x >> 1; i > 0; i >>= 1) { if (threadIdx.x < i) temp[threadIdx.x] += temp[threadIdx.x + i]; __syncthreads(); } if (threadIdx.x == 0) c[blockIdx.x] = temp[0]; #else float t; if (threadIdx.x == 0) { c[blockIdx.x] = 0.0f; int j = 0; for (int i = blockIdx.x*blockDim.x; ((i < ((blockIdx.x + 1)*blockDim.x)) && (i < n)); i++) { t = temp[j++]; c[blockIdx.x] = c[blockIdx.x] + t; } } #endif } int main(int argc, char* argv[]) { int i, n, ARRAY_BYTES; float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; float sum; float seq_sum; clock_t t; srand(time(NULL)); if (argc == 2) { n = atoi(argv[1]); } else { printf("N? "); fflush(stdout); scanf("%d", &n); } int BLOCKS_PER_GRID = (unsigned int)ceil(n / (float)NUM_OF_GPU_THREADS); printf("bpg = %d\n", BLOCKS_PER_GRID); // arrays n host ARRAY_BYTES = n * sizeof(float); h_A = (float *)malloc(ARRAY_BYTES); h_B = (float *)malloc(ARRAY_BYTES); h_C = (float *)malloc(BLOCKS_PER_GRID * sizeof(float)); printf("\ncreating A and B...\n\n"); vecFillRand(n, h_A); vecFillRand(n, h_B); vecFillRand(BLOCKS_PER_GRID, h_C); // arrays on device cudaMalloc((void**)&d_A, ARRAY_BYTES); cudaMalloc((void**)&d_B, ARRAY_BYTES); cudaMalloc((void**)&d_C, BLOCKS_PER_GRID * sizeof(float)); // transfer the arrays to the GPU cudaMemcpy(d_A, h_A, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, BLOCKS_PER_GRID * sizeof(float), cudaMemcpyHostToDevice); // TIME START // create events for timing execution cudaEvent_t start = cudaEvent_t(); cudaEvent_t stop = cudaEvent_t(); cudaEventCreate(&start); cudaEventCreate(&stop); // record time into start event cudaEventRecord(start, 0); // 0 is the default stream id // launch the kernel dim3 block(NUM_OF_GPU_THREADS); // 256, 1, 1 dim3 grid(BLOCKS_PER_GRID); printf("computing dotProduct... \n"); dotProduct <<<grid, block >>>(d_A, d_B, d_C, n); // block until the device has completed cudaDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); // TIME END // record time into stop event cudaEventRecord(stop, 0); // synchronize stop event to wait for end of kernel execution on stream 0 cudaEventSynchronize(stop); // compute elapsed time (done by CUDA run-time) float elapsed_kernel = 0.f; cudaEventElapsedTime(&elapsed_kernel, start, stop); // release events cudaEventDestroy(start); cudaEventDestroy(stop); // print krenel time printf("CUDA TIME: %f \n\n", elapsed_kernel / 1000); // copy back the result array to the CPU cudaMemcpy(h_C, d_C, BLOCKS_PER_GRID * sizeof(float), cudaMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("memcpy"); // compute sum sum = 0; for (i = 0; i < BLOCKS_PER_GRID; i++) sum += h_C[i]; // launch sequential t = clock(); printf("computing seq_dotProduct... \n"); seq_sum = seq_dotProduct(h_A, h_B, n); t = clock() - t; printf("SEQ TIME: %f \n\n", ((float)t) / CLOCKS_PER_SEC); // check sum and seq_sum float value = abs((sum - seq_sum) / sum); if (value > ACCURACY) { printf("Test FAILED: err: %f cpu: %f gpu: %f \n", value, seq_sum, sum); } else { printf("Test PASSED \n"); } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
21,969
#include "includes.h" __global__ void solution_inter(float *z, float *g, float lambda, int nx, int ny) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int idx = x + y*nx; if (x<nx && y<ny) g[idx] = -z[3 * idx + 2] * lambda; }
21,970
// Write a CUDA program to compute the sum of two arrays. Input: Number of elements in the array. Output: Array of sums // Used the Error Handler function written by Dr. Rama in his Colab shared to us on google classroom #include<stdio.h> #include<stdlib.h> #include<time.h> #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) ) __managed__ int n = 5; static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } __global__ void sumArrays(int *a, int *b, int *sum) { int tid = threadIdx.x; if(tid < n) { sum[tid] = b[tid] + a[tid]; } } int main() { scanf("%d", &n); srand(time(0)); int *a; int *b; int *sum; int *c_a; int *c_b; int *c_sum; a = (int *)malloc(n * sizeof(int)); b = (int *)malloc(n * sizeof(int)); sum = (int *)malloc(n * sizeof(int)); HANDLE_ERROR(cudaMalloc((void **)&c_a, n * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&c_b, n * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&c_sum, n * sizeof(int))); for (int i = 0; i < n; i++) { a[i] = rand() % 1000; b[i] = rand() % 1000; // To see the elements uncomment line 54 and 56, if this is 53 // printf("%d %d\n", a[i], b[i]); } // puts(" "); HANDLE_ERROR(cudaMemcpy(c_a, a, n * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(c_b, b, n * sizeof(int), cudaMemcpyHostToDevice)); sumArrays<<<1, n>>>(c_a, c_b, c_sum); cudaDeviceSynchronize(); HANDLE_ERROR(cudaMemcpy(sum, c_sum, n * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; i++) { printf("sum[%d] = %d\n", i, sum[i]); } puts(" "); free(a); free(b); free(sum); HANDLE_ERROR(cudaFree(c_a)); HANDLE_ERROR(cudaFree(c_b)); HANDLE_ERROR(cudaFree(c_sum)); return 0; }
21,971
#include <stdio.h> #include <cuda_runtime.h> #include <time.h> void llenaAleatorio(float arreglo[], int n); void ImprimeArreglo(float arreglo[], float arreglo2[], float arreglo3[], int n); __global__ void VecAdd(float* A, float* B, float* C, int N){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < N) C[i] = A[i] + B[i]; } int main(void) { int N = 1024; size_t size = N * sizeof(float); float * h_A = (float*)malloc(size); float * h_B = (float*)malloc(size); float * h_C = (float*)malloc(size); //Inicializar vectores de entrada llenaAleatorio(h_A, N); llenaAleatorio(h_B, N); //Allocate vectors in device memory float* d_A; cudaMalloc(&d_A, size); float* d_B; cudaMalloc(&d_B, size); float* d_C; cudaMalloc(&d_C, size); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock -1)/threadsPerBlock; VecAdd<<<blocksPerGrid,threadsPerBlock>>>(d_A, d_B, d_C, N); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); ImprimeArreglo(h_A, h_B,h_C, N); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); } void llenaAleatorio(float arreglo[], int n) { // Declaracion de variables int iSem = time(NULL); int sum = 0; // Semilla de rand(); srand(iSem); int i; for(i = 0; i < n; i++) { arreglo[i] = rand()%100; } } void ImprimeArreglo(float arreglo[], float arreglo2[], float arreglo3[], int n) { int i; for(i = 0; i < n; i++) { printf("%f + %f = %f\n", arreglo[i], arreglo2[i], arreglo3[i]); } }
21,972
#include<cuda.h> #include<stdio.h> void initializeArray(int*,int); void stampaMatriceArray(int*, int, int); void equalArray(int*, int*, int); void sommaMatriciCompPerCompCPU(int *, int *, int *, int); //specifica il tipo di funzione kernel __global__ void sommaMatriciCompPerCompGPU(int*, int*, int*, int); int main(int argn, char * argv[]){ //numero di blocchi e numero di thread per blocco dim3 gridDim, blockDim(8,4); //blocco 8*4 = 32 thread totali int N; //numero totale di elementi dell'array (matrice) //array memorizzati sull'host int *A_host, *B_host, *C_host; //array memorizzati sul device int *A_device, *B_device, *C_device; int *copy; //array in cui copieremo i risultati di C_device int size; //size in byte di ciascun array printf("***\t SOMMA COMPONENTE PER COMPONENTE DI DUE MATRICI \t***\n"); printf("Inserisci il numero di elementi della matrice\n"); scanf("%d",&N); //determinazione esatta del numero di blocchi gridDim.x = N / blockDim.x + ((N % blockDim.x) == 0 ? 0:1); //se la divisione ha resto dobbiamo aggiungere un blocco in più alle righe gridDim.y = N / blockDim.y + ((N % blockDim.y) == 0 ? 0:1); //se la divisione ha resto dobbiamo aggiungere un blocco in più alle colonne //size in byte di ogni array size = N*N*sizeof(int); //stampa delle info sull'esecuzione del kernel printf("Taglia della matrice N*N = %d * %d\n", N,N); printf("Numero di thread per blocco = %d\n", blockDim.x*blockDim.y); printf("Numero di blocchi = %d\n", gridDim.x*gridDim.y); //allocazione dati sull'host A_host=(int*)malloc(size); B_host=(int*)malloc(size); C_host=(int*)malloc(size); copy=(int*)malloc(size); //array in cui copieremo i risultati di C_device //allocazione dati sul device cudaMalloc((void**)&A_device,size); cudaMalloc((void**)&B_device,size); cudaMalloc((void**)&C_device,size); //inizializzazione dati sull'host initializeArray(A_host, N*N); initializeArray(B_host, N*N); //copia dei dati dall'host al device cudaMemcpy(A_device, A_host, size, cudaMemcpyHostToDevice); cudaMemcpy(B_device, B_host, size, cudaMemcpyHostToDevice); //azzeriamo il contenuto della vettore C memset(C_host, 0, size); //setta a 0 l'array C_host cudaMemset(C_device, 0, size); //setta a 0 l'array C_device //invocazione del kernel sommaMatriciCompPerCompGPU<<<gridDim, blockDim>>>(A_device, B_device, C_device, N*N); //copia dei risultati dal device all'host cudaMemcpy(copy, C_device, size, cudaMemcpyDeviceToHost); //chiamata alla funzione seriale per il prodotto di due array sommaMatriciCompPerCompCPU(A_host, B_host, C_host, N*N); //test di correttezza: verifichiamo che le due somme di matrici corrispondano equalArray(C_host, copy, N*N); //de-allocazione host free(A_host); free(B_host); free(C_host); free(copy); //de-allocazione device cudaFree(A_device); cudaFree(B_device); cudaFree(C_device); exit(0); } void initializeArray(int *array, int n){ int i; for(i=0;i<n;i++) array[i] = i; } void stampaMatriceArray(int* matrice, int righe, int colonne){ int i; for(i=0;i<righe*colonne;i++){ printf("%d \t", matrice[i]); if(i%righe==colonne-1) printf("\n"); } printf("\n"); } void equalArray(int* a, int*b, int n){ int i=0; while(a[i]==b[i]) i++; if(i<n) printf("I risultati dell'host e del device sono diversi\n"); else printf("I risultati dell'host e del device coincidono\n"); } //Seriale void sommaMatriciCompPerCompCPU(int *a, int *b, int *c, int n){ int i; for(i=0;i<n;i++) c[i]=a[i]+b[i]; } //Parallelo __global__ void sommaMatriciCompPerCompGPU(int *a, int *b, int *c, int n){ int i, j, index; i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; index = j * gridDim.x * blockDim.x + i; if(index < n) c[index] = a[index]+b[index]; }
21,973
#include <assert.h> // assert() is only supported // for devices of compute capability 2.0 and higher #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #undef assert #define assert(arg) #endif __global__ void testAssert(void) { int is_one = 1; int should_be_one = 0; // This will have no effect assert(is_one); // This will halt kernel execution assert(should_be_one); } int main(int argc, char* argv[]) { testAssert<<<1,1>>>(); cudaDeviceSynchronize(); return 0; }
21,974
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <string.h> #include "spiky25.cu" #define neurons_per_thread 50 #define no_input_neurons 12 #define no_output_neurons 3 #define clock_cycle 10 int **getNeighbors(char *file); //This is an optimised queue for this project struct Queue { private: unsigned short int *_queue; short int front; //Better for front and back to be signed, as modular arithmetic is more efficient short int back; unsigned short int size; public: unsigned short int neuron_count; public: __device__ void init(unsigned short int *sPtr, int queue_size, unsigned short int neuron) { _queue = sPtr; front = 0; back = 0; size = queue_size; _queue[front] = neuron; } __device__ short int push(unsigned short int neuron) { //printf("Pushing: %d\n", neuron); //printf(" "); back = (back - 1) % size; if(back < 0) back = size - 1; _queue[back] = neuron; return back; } __device__ unsigned short int sFront() { //printf("FRONT: %d - %d\n", front, _queue[front]); return _queue[front]; } __device__ unsigned short int pop() { if(front > back) { short int tmp = front; front = (front - 1) % size; if(front < 0) front = size - 1; return _queue[tmp]; } else if(front < back) { short int tmp = front; front = (front - 1) % size; if(front < 0) front = size - 1; return _queue[tmp]; } else { unsigned short int tmp = _queue[front]; _queue[front] = 0; return tmp; } } __device__ short int backPos() { return back; } __device__ short int frontPos() { return front; } }; //This is the neuron struct Neuron { private: unsigned short int *sharedPointer; int *oSodium, *iSodium; int noNodes; short int threshold; bool isSpiking = true; int **neighbors; struct Queue *queue; unsigned char *active_neurons; unsigned char *neuron_synapses; char *global_input; char *global_output; char *_clock; public: __device__ void init(unsigned short int *sPtr, int **_neighbors, struct Queue *_queue, unsigned char *_active_neurons, unsigned char *_neuron_synapses, char *input, char *output, char* clock) { //initialise variables neighbors = _neighbors; noNodes = neighbors[0][0]; threshold = 30; queue = _queue; active_neurons = _active_neurons; global_input = input; global_output = output; _clock = clock; neuron_synapses = _neuron_synapses; sharedPointer = sPtr;//Shared memory must be declared outside of the class, therefore we pass a pointer to it. if(blockDim.x * blockIdx.x + threadIdx.x > noNodes) { //If id of thread isSpiking = false; return; } //Initialise sodium concentrations //oSodium = &sharedPointer[2 * threadIdx.x + 0]; //iSodium = &sharedPointer[2 * threadIdx.x + 1]; //*oSodium = 0; *iSodium = 0; //Initialise neighbors } __device__ void run() { int x; x = threadIdx.x; printf("_%d_", x); if(blockIdx.x > 1) return; //if(blockDim.x * blockIdx.x + threadIdx.x > noNodes) return; short int sodium_c[neurons_per_thread]; short int rec[neurons_per_thread]; //Keeps record of synapse weight index. for(short int i = 0; i < neurons_per_thread; i++) sodium_c[i] = 0; unsigned int counter = 0; while(true) { clock_t start = clock(); clock_t now; for (;;) { if(_clock[0] < clock_cycle) { printf(""); break; } /*now = clock(); clock_t cycles = now > start ? now - start : now + (0xffffffff - start); if (cycles >= 10000000) { break; }*/ } if(threadIdx.x == 0 && blockIdx.x == 0) { printf("SPIKE"); _clock[0]++; } __syncthreads(); // Stored "now" in global memory here to prevent the compiler from optimizing away the entire loop. //printf("Start %d %d\n", threadIdx.x, now); //Grab global input if(threadIdx.x < no_input_neurons && ( global_input[threadIdx.x] == 1 )) { queue->push((unsigned int) threadIdx.x + 1); //printf("Global input recieved: %d \n", threadIdx.x + 1); } //printf("end %d\n", threadIdx.x); if(threadIdx.x == 0) printf("Queue size: %d", -queue->backPos() + queue->frontPos()); //First, determine which neurons are effected by the firing neuron, store this information in shared memory. Node will be removed from the queue later. unsigned short int node = queue->sFront(); if(node == 0) { //printf("No neurons\n"); continue; //No neurons are spiking, wait for a spiking neuron. (Neuron == 0) } int *destinations = neighbors[(int) node]; //These are the neurons the sodium is sent to int noNeighbors = destinations[0]; //destinations[0] ==> refers to number of neighbors //printf("Node: %d Dest: %d %d %d\n", node, destinations[1], destinations[2], destinations[3]); int synapses_per_thread = neurons_per_thread; //12000 synapses max, all neurons must be processed in the same block, therefore max 2048 threads available. for(int i = 0; i < synapses_per_thread; i++) { if(!(i + synapses_per_thread * threadIdx.x > noNeighbors)) { if(!(i + synapses_per_thread * threadIdx.x)) continue; int curr_neuron = destinations[synapses_per_thread * threadIdx.x + i]; active_neurons[(curr_neuron - curr_neuron % 8)/8] += 1 << (curr_neuron % 8); //Store the synapse weights in shared memory //if(synapses_per_thread * threadIdx.x + i < sizeof(neuron_synapses)) --> could check overflow. But if this happens, very bad so hopefully neural network won't be too big! neuron_synapses[synapses_per_thread * threadIdx.x + i] = (unsigned char) (destinations[(int) noNeighbors + synapses_per_thread * threadIdx.x + i]); printf("\nSynapse_ %d: %d\n", synapses_per_thread * threadIdx.x + i, neuron_synapses[synapses_per_thread * threadIdx.x + i]); //neurons_per_thread * 64 ==> Neurons per core //Record which neurons are to recieve sodium //This is a slow point which I tried very hard to avoid, uses up way to much memory and destroys efforts of the other parts of the program rec[ curr_neuron ] = synapses_per_thread * threadIdx.x + i; } else break; //syncthreads renders this unnecessary } //__syncthreads(); //Could be unecessary for(int i = 0; i < sizeof(neuron_synapses)/sizeof(*neuron_synapses); i++) { printf(" +=-%d", neuron_synapses[i]); } printf("+=-\n"); if(threadIdx.x == 0 && blockIdx.x == 0) { queue->pop(); //Remove the active node from the queue, so that we do not reprocess it in the future. counter++; //Increment the counter so that we can determine old neurons printf("Counter: %d\n", counter); } __syncthreads(); //Could be unecessary //Now each neuron process the sodium it recieves. We process more neurons per thread than synapses to maximise memory utilisation. if(!(neurons_per_thread * threadIdx.x > noNeighbors)) { for(short int i = 0; i < neurons_per_thread; i++) { short int curr_neuron = neurons_per_thread * threadIdx.x + i; if(!curr_neuron) continue; //destinations[(int) node + neurons_per_thread * threadIdx.x + i]; //printf("Curr: %d Active neurons: %d\n", curr_neuron, active_neurons[(int) (curr_neuron - curr_neuron % 8)/8] & 1 << (curr_neuron % 8)); //printf("Hex: %x\n", active_neurons[(curr_neuron - curr_neuron % 8)/8]); if(active_neurons[(int) (curr_neuron - curr_neuron % 8)/8] & 1 << (curr_neuron % 8)) { //printf("ACTIVE: %d\n", curr_neuron); //This algorithm should be modified to improve machine learning, but this can be done after some experimental data. sodium_c[i] += synapseStrenghthCalc(counter, neuron_synapses[ rec[ curr_neuron] ]); printf("ACTIVE: %d Strength: %d Sodium: %d Counter: %d\n", curr_neuron, neuron_synapses[ rec[ curr_neuron] ], sodium_c[i], counter); if(neuron_synapses[ rec[ curr_neuron] ] < 30) neuron_synapses[ rec[ curr_neuron] ]++; if(sodium_c[i] > threshold) { queue->push((unsigned short int) curr_neuron); printf("SPIKING: %d %d\n", curr_neuron, sodium_c[i]); if(curr_neuron >= no_input_neurons && curr_neuron < no_output_neurons + no_input_neurons) { printf("Setting global\n"); global_output[curr_neuron]++; if(curr_neuron == 12) printf("MOVING FORWARD: %d\n", global_output[curr_neuron]); } /*printf("rec: "); for(int j = 0; j < sizeof(rec); j++) { printf("%d ", neuron_synapses[(int) rec[ j ]]); } printf("\n");*/ sodium_c[i] = 0; } active_neurons[(curr_neuron - curr_neuron % 8)/8] -= 1 << (curr_neuron % 8); //Set it back to zero } } } //Reset everything for(int i = 0; i < synapses_per_thread; i++) { if(!(i + synapses_per_thread * threadIdx.x > destinations[0])) { int curr_neuron = destinations[synapses_per_thread * threadIdx.x + i]; active_neurons[(curr_neuron - curr_neuron % 8)/8] = 0b00000000; //Store the updated synapse weights in global memory destinations[(int) node + synapses_per_thread * threadIdx.x + i] = neuron_synapses[synapses_per_thread * threadIdx.x + i]; } } //__syncthreads(); //Could be unecessary } //printf("\nFINISHED?\n"); } __device__ void input(int sodium) { *oSodium += sodium; } __device__ void spiking() { if(*oSodium > threshold) { //send to neighbors } } __device__ unsigned char synapseStrenghthCalc(unsigned int counter, unsigned char synapseWeight) { return synapseWeight < 30 ? synapseWeight : 30; //printf("Counter: %d vs Weight: %d\n", counter, synapseWeight); if(counter < synapseWeight) { if(synapseWeight > counter) return (synapseWeight - counter < 10)?synapseWeight - counter:10; else return 1; } else { if(256 - counter + synapseWeight > 0) return (256 - counter + synapseWeight)?256 - counter + synapseWeight:10; else return 1; } } }; __global__ void fcnCall(struct Neuron *nvidia, int **neighbors, struct Queue *queue, char *input, char *output, char* clock) { //Max memory is 48KB, a short int is 2Bytes. Therefore, 24000 total of short ints available in shared memory. Reserve every single byte. __shared__ unsigned short int nQueue[ 14904 ]; //Memory for queue of currently spiking neurons. Make as big as possible. __shared__ unsigned char active_neurons[ 8192 ]; //65536 bits, 65536 unique neurons per block. Char has twice as many bits as a short int. __shared__ unsigned char neuron_synapses[ 10000 ]; //10000 synayses is the max number of synapses to be expected. The strength of a synapse is represented by one byte, therefore reserve 10000 bytes for max 10000 synapses int x; x = threadIdx.x; printf("_%d_", x); for(int i = 0; i < sizeof(active_neurons); i++) { active_neurons[i] = 0b00000000; //initialise active neuron array. } for(int i = 0; i < sizeof(neuron_synapses); i++) { active_neurons[i] = 0b00000000; //initialise synapse array (probably not necessary) } if(threadIdx.x == 0 && blockIdx.x == 0) { queue->init(nQueue, 500, 1); queue->push((unsigned short int) 5); } __syncthreads(); nvidia->init(nQueue, neighbors, queue, active_neurons, neuron_synapses, input, output, clock); nvidia->run(); return; } int main(void) { int **neighbors = getNeighbors("network"); //Preparing these classes needs a wrapper struct Neuron *_neuron = (struct Neuron*) malloc(sizeof(*_neuron)); struct Queue *_queue = (struct Queue*) malloc(sizeof(*_queue)); struct Neuron *neuron; struct Queue *queue; char *input; char *output; char *clock; cudaMallocManaged(&input, no_input_neurons * sizeof(char)); for (int i = 0; i < no_input_neurons; i++) input[i] = 0; cudaMallocManaged(&output, no_output_neurons * sizeof(int)); for (int i = 0; i < no_output_neurons; i++) output[i] = 0; cudaMallocManaged(&clock, 2 * sizeof(char)); cudaMalloc(&neuron, sizeof(*_neuron)); cudaMemcpy(neuron, _neuron, sizeof(*_neuron), cudaMemcpyHostToDevice); cudaMalloc(&queue, sizeof(*_queue)); cudaMemcpy(queue, _queue, sizeof(*_queue), cudaMemcpyHostToDevice); //Calls the function which calls the neuron fcnCall<<<1, 256>>>(neuron, neighbors, queue, input, output, clock); SPIKY25 spiky; spiky.init(); int count = 0; clock[0] = 1; for(int i = 0; ; i++) { if(clock[0] > clock_cycle - 1) { int touch = spiky.touch(); int **vision = spiky.vision(); for(int j = 0; j < no_input_neurons; j++) { input[j] = 0; } if(true) { if(touch > 0) { input[9] = 1; printf("Sending touch\n"); } else if (touch < 0) { input[10] = 1; printf("Sending pain\n"); } } else printf("NO PAIN"); int j = 0; for(int a = 0; a < 3; a++) { for(int b = 0; b < 3; b++) { if(vision[a][b]) { input[j] = 1; printf("sending vision\n"); } j++; } } if(output[12] > 0) printf("REALLY ALERT MOVE: %d\n", output[12]); spiky.move(output[12] > 0 ? 1 : 0, output[13] > 0 ? 2 : 0, output[14] > 0 ? 1 : 0); output[12] = 0; output[13] = 0; output[14] = 0; spiky.printBoard(); printf("Count: %d\n", count); count++; if(count > 10000) exit(EXIT_SUCCESS); clock[0] = 0; } } //cudaMemcpy(_neuron, neuron, sizeof(*_neuron), cudaMemcpyDeviceToHost); //cudaMemcpy(_queue, queue, sizeof(*_queue), cudaMemcpyDeviceToHost); cudaFree(neuron); free(_neuron); cudaFree(queue); free(_queue); return 0; } /* * Returns pointer to adjacency list mapping out all of the neurons in the network. * Adjacency list is in the form of a jagged array, with each jagged array starting with a header int to describe array length. * First element [0][0] describes number of nodes in array. */ int **getNeighbors(char *file) { //Read in neural network char buff[255]; FILE *fp = fopen(file, "r"); char *line = NULL; size_t len = 0; ssize_t read; int noLines = 0; int **neighbors; noLines = 0; //first line is the header line, reads number of nodes. bool header = true; int i = 1; int j = 0; while( (read = getline(&line, &len, fp)) != -1) { char *nodes = strtok(line, " "); int noNodes = 0; int nodeList[BUFSIZ]; bool first = true; j = 0; int *temp; int *host_temp; //for cuda malloc copy while(nodes) { if(header) { int noLines = atoi(nodes); cudaMalloc( (void***) (&neighbors), sizeof(int*) * (noLines + 1) ); cudaMalloc((void**) &(temp), sizeof(int) ); //cudaMalloc((void**) (&neighbors[0][0]), sizeof(int) ); cudaMemcpy(temp, &noLines, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(neighbors + 0, &temp, sizeof(int*), cudaMemcpyHostToDevice); //cudaMallocManaged((void**) neighbors, sizeof(int*) * (noLines + 1)); //cudaMallocManaged((void*) (&neighbors[0][0]), sizeof(int) ); //neighbors = (int**) malloc(noLines * sizeof(int*) + 1); //Add one for header int describing array length //neighbors[0] = (int*) malloc(sizeof(int) ); //neighbors[0][0] = noLines; //record in header length of array header = false; nodes = strtok(NULL, " "); break; } else if(first) { noNodes = atoi(nodes); //free(host_temp); //cudaFree(temp); host_temp = (int*) malloc(sizeof(int) * 2 * noNodes + 1); //Add one for header int describing array length. Multiply number of nodes by 2 as list includes synapse weights. cudaMalloc( (void**) &(temp), sizeof(int) * ( 2 * noNodes + 1) ); host_temp[0] = noNodes; //cudaMallocManaged((void*) neighbors[i], sizeof(int) * (noNodes + 1)); //Add one for header int describing array length //neighbors[i] = (int*) malloc(sizeof(int) * noNodes + 1); //Add one for header int describing array length //neighbors[i][0] = noNodes; //record in header length of array j++; nodes = strtok(NULL, " "); first = false; continue; } else { int node = atoi(nodes); //cudaMemcpy(temp[j], node, sizeof(int), cudaMemcpyHostToDevice); host_temp[j] = node; //neighbors[i][j] = node; //printf(" %d %d ", i, j); j++; nodes = strtok(NULL, " "); } //Copy neighbor row into GPU for given node if(j == 2 * noNodes + 1) { cudaMemcpy(temp, host_temp, sizeof(int) * j, cudaMemcpyHostToDevice); cudaMemcpy(neighbors+i, &temp, sizeof(int*), cudaMemcpyHostToDevice); j = 0; i++; } } } return neighbors; }
21,975
#include <iostream> #include <vector> __global__ void fill( int * v, std::size_t size ) { auto tid = threadIdx.x; v[ tid ] = tid; } int main() { std::vector< int > v( 100 ); int * v_d = nullptr; cudaMalloc( &v_d, v.size() * sizeof( int ) ); fill<<< 1, 1025 >>>( v_d, v.size() ); cudaDeviceSynchronize(); auto err2 = cudaGetLastError(); if(err2!= cudaSuccess) { std::cout<<cudaGetErrorString(err2); } // Récupération du code erreur du kernel en cas de plantage. cudaDeviceSynchronize(); // Attente de la fin d'exécution du kernel. cudaError err = cudaGetLastError(); if( err != cudaSuccess ) { std::cerr << cudaGetErrorString( err ); // récupération du message associé au code erreur. } // Récupération du code erreur pour les fonctions CUDA synchrones. err = cudaMemcpy( v.data(), v_d, v.size() * sizeof( int ), cudaMemcpyDeviceToHost ); if( err != cudaSuccess ) { std::cerr << cudaGetErrorString( err ); // récupération du message associé au code erreur. } for( auto x: v ) { std::cout << x << std::endl; } return 0; }
21,976
#include <iostream> #include "bounding_box.cuh" int main(){ BoundingBox box; float2 p1 = make_float2(0.5f,0.5f); float2 p2 = make_float2(10.0f, 10.0f); std::cout << "Does point 1 lie in the box? " << box.contains(p1) <<"" << std::endl; std::cout << "Does point 2 lie in the box? " << box.contains(p2) <<"" << std::endl; }
21,977
#include <algorithm> #include <cassert> #include <iostream> #include <vector> //CUDA kernel for vector addition // __global__ means this called from the CPU, and runs on the GPU __global__ void vectorAdd(const int *__restrict a, const int *__restrict b, int *__restrict c, int N) { //Calculate global thread ID of 1D thread int tid = (blockIdx.x * blockDim.x) + threadIdx.x; //do a Boundary check to make sure we are not getting memory we don't own if(tid < N) c[tid] = a[tid] + b[tid]; } void verify_result(std::vector<int> &a, std::vector<int> &b, std::vector<int> & c) { for (int i = 0; i < a.size(); i++) { assert(c[i] == a[i] + b[i]); } } int main() { //bitwise left shift, comes out to 2^16 constexpr int N = 1 << 16; std::cout << "value of N = " << N; constexpr size_t bytes = sizeof(int) * N; //preallocate size at compile time so we don't get a runtime //performance hit std::vector<int> a; a.reserve(N); std::vector<int> b; b.reserve(N); std::vector<int> c; c.reserve(N); // Initialize random numbers in each array // between 0 to 100 for (int i = 0; i < N; i++) { a.push_back(rand() % 100); b.push_back(rand() % 100); } // Allocate memory on the device int *d_a, *d_b, *d_c; cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // copy data from the host to the device (CPU -> GPU) // synchronous call cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b.data(), bytes, cudaMemcpyHostToDevice); //threads per CTA (1024) int NUM_THREADS = 1 << 10; //CTA's per Grid //We launch at LEAST as many threads as we have elements //This equation pads an extra CTA to the grid if N cannot evenly be // divided by NUM_THREADS (eg. N = 1025, NUM_THREADS = 1024) int NUM_BLOCKS = (N + NUM_THREADS - 1) / NUM_THREADS; std::cout << "\nvalue of NUM_BLOCKS = " << NUM_BLOCKS; vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, N); //Copy sum vector from device to host //cudaMemcpy is a synchronous operation, and waits for the prior kernel //launch to complete (both go to the default stream in this case). // Therefore, this cudaMemcpy acts as both a memcpy and synchronization // barrier // syncronous call cudaMemcpy(c.data(), d_c, bytes, cudaMemcpyDeviceToHost); // Check result for errors, this is a cpu check verify_result(a, b, c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); std::cout << "\nVector addition complete"; return 0; }
21,978
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> // nvcc -o CudaPasswordCracking CudaPasswordCracking.cu __device__ int passcrack(char *crack){ char pass1[]="SH2973"; char pass2[]="KR3097"; char pass3[]="PK9736"; char pass4[]="BM4397"; char *s1 = crack; char *s2 = crack; char *s3 = crack; char *s4 = crack; char *p1 = pass1; char *p2 = pass2; char *p3 = pass3; char *p4 = pass4; while(*s1 == *p1){ if(*s1 == '\0'){ return 1; } s1++; p1++; } while(*s2 == *p2){ if(*s2 == '\0'){ return 1; } s2++; p2++; } while(*s3 == *p3){ if(*s3 == '\0'){ return 1; } s3++; p3++; } while(*s4 == *p4){ if(*s4 == '\0'){ return 1; } s4++; p4++; } return 0; } __global__ void kernel() { char alphabet[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'}; char num[10] = {'0','1','2','3','4','5','6','7','8','9'}; char crack[7]; crack[6] = '\0'; int s, h, k, r; for(s=0;s<10;s++){ for(h=0; h<10; h++){ for(k=0; k<10; k++){ for(r=0; r<10; r++){ crack[0] = alphabet[blockIdx.x]; crack[1] = alphabet[threadIdx.x]; crack[2] = num[s]; crack[3] = num[h]; crack[4] = num[k]; crack[5] = num[r]; if(passcrack(crack)){ printf("Password successfully cracked: %s\n", crack); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char *argv[]) { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel <<<26, 26>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
21,979
#include <iostream> #include "../include/lglist.h" #include <thrust/device_vector.h> #define def_dvec(t) thrust::device_vector<t> #define to_ptr(x) thrust::raw_pointer_cast(&x[0]) using namespace std; __global__ void test(float *output){ gpu_linearized_stl::list<float,100> list; int idx = 0; output[idx++] = list.max_size(); output[idx++] = list.full(); output[idx++] = (float)list.empty(); output[idx++] = (float)list.size(); for(int i=0;i<10;++i){ list.push_back(i*1.7); output[idx++] = (float)list.empty(); output[idx++] = (float)list.size(); } for(int i=0;i<6;++i) { int a = list.end(); auto p = list.insert(list.decrement(a), 55); list.decrement(p); list.decrement(p); list.insert(p, 77); } for(auto p=list.begin(); p!=list.end();list.increment(p)) output[idx++] = list.at(p); output[idx++] = list.front(); output[idx++] = list.back(); int p; while((p=list.find(77))!=list.end()){ list.erase(p); } output[idx++] = 10086; for(auto p=list.begin(); p!=list.end();list.increment(p)) output[idx++] = list.at(p); list.reverse(); output[idx++] = 10086; for(auto p=list.begin(); p!=list.end();list.increment(p)) output[idx++] = list.at(p); list.pop_front(); list.pop_back(); list.pop_back(); output[idx++] = 10086; for(auto p=list.begin(); p!=list.end();list.increment(p)) output[idx++] = list.at(p); } int main(){ def_dvec(float) dev_out(150, 0); test<<<1, 1>>>(to_ptr(dev_out)); for(auto k:dev_out) cout<<k<<' '; cout<<endl; return 0; }
21,980
#include <cuda.h> #include <stdio.h> __global__ void simpleKernel() { printf("Hello World!\n"); } int main() { const int numThreads = 4; // invoke GPU kernel, with one block that has four threads simpleKernel<<<1, numThreads>>>(); cudaDeviceSynchronize(); return 0; }
21,981
#include "includes.h" __global__ void kernelNormalizeMotionEnergyAsync(int bsx, int bsy, int n, float alphaPNorm, float alphaQNorm, float betaNorm, float sigmaNorm, float* gpuEnergyBuffer) { int bufferPos = threadIdx.x + blockIdx.x * blockDim.x; float sigmaNorm2_2 = 2*sigmaNorm*sigmaNorm; if(bufferPos < n) { int bx,by; int bxy = bufferPos / (bsx*bsy); bx = bxy % bsx; by = bxy / bsx; // Read energy float I = gpuEnergyBuffer[bufferPos]; float q_i = 0; // Normalize over 5x5 region for(int y = -2; y <= 2; y++) { int by_ = by + y; if(by_ < 0 || by_ >= bsy) continue; for(int x = -2; x <= 2; x++) { int bx_ = bx + x; if(bx_ < 0 || bx_ >= bsx || (bx == bx_ && by == by_)) continue; // TODO // Each thread computes the same float gaus = 1/(sigmaNorm2_2*M_PI)* exp(-(bx_*bx_ + by_*by_)/sigmaNorm2_2); // TODO Use shared memory to avoid extra global memory access q_i += gpuEnergyBuffer[by_*bsx+bx_]*gaus; } } q_i /= alphaQNorm; // Compute p_i float p_i = (I*betaNorm)/(alphaPNorm + I + q_i); // Use normalized value gpuEnergyBuffer[bufferPos] = p_i; } }
21,982
#include <stdio.h> #include <time.h> __global__ void ken(double *a) { int id=blockIdx.x*blockDim.x+threadIdx.x; a[id]=pow((double)(4*id+1),-1)-pow((double)(4*id+3),-1); } __global__ void ken2(double *a,double *b,int *dcount) { int id=blockIdx.x*blockDim.x+threadIdx.x; int count=*dcount; if(count%2==0) { count=count/2; } else { count=count/2+1; } if(id<count) { b[id]=a[id*2]+a[id*2+1]; a[id*2]=0.0; a[id*2+1]=0.0; } if(id==0) *dcount=count; } int main() { clock_t t1, t2; int block =50000; int thread=300; int count=block*thread; int size=block*thread; int *dcount; double *a,*da,*db; a=(double*)malloc(size*sizeof(double)); cudaMalloc((void**)&da,size*sizeof(double)); cudaMalloc((void**)&db,size*sizeof(double)); cudaMalloc((void**)&dcount,sizeof(int)); t1 = clock(); cudaMemcpy(dcount,&count,sizeof(int),cudaMemcpyHostToDevice); ken<<<block,thread>>>(da); while(count>1) { if(count%2==0) count=count/2; else count=count/2+1; ken2<<<block,thread>>>(da,db,dcount); cudaDeviceSynchronize(); cudaMemcpy(da,db,size*sizeof(double),cudaMemcpyDeviceToDevice); } cudaMemcpy(a,da,size*sizeof(double),cudaMemcpyDeviceToHost); t2 = clock(); printf("\na[0]=%.8lf\n size=%d\n",4*a[0],size*2); printf("%lf\n", (t2-t1)/(double)(CLOCKS_PER_SEC)); return 0; }
21,983
#include <iostream> #include <math.h> #include <stdio.h> //function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x* blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; /*printf("threadIdx.x = %d threadIdx.y = %d threadIdx.z = %d\ blockIdx.x = %d blockIdx.y = %d blockIdx.z = %d\ blockDim.x = %d blockDim.y = %d blockDim.z = %d\ gridDim.x = %d gridDim.y = %d gridDim.z = %d\n",\ threadIdx.x, threadIdx.y, threadIdx.z,\ blockIdx.x,blockIdx.y,blockIdx.z,\ blockDim.x,blockDim.y,blockDim.z,\ gridDim.x,gridDim.y,gridDim.z);*/ for (int i = index; i < n; i+= stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; //1M elements //int N = 100; //100 elements int blockSize = 256; int numBlocks = (N+blockSize -1) / blockSize; //int numBlocks = 1; //Allocate Unified Memory -- accessible from CPU or GPU float *x, *y, *d_x, *d_y; x = (float *)malloc(N*sizeof(float)); y = (float *)malloc(N*sizeof(float)); cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); //initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } //Copy cudaMemcpy(d_x,x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y,y, N*sizeof(float), cudaMemcpyHostToDevice); // Run kernel on 1M elements on the CPU add<<<numBlocks,blockSize>>>(N, d_x, d_y); //Wait for GPU to finish before accessing on host //cudaDeviceSynchronize(); cudaMemcpy(y,d_y, N*sizeof(float), cudaMemcpyDeviceToHost); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(d_x); cudaFree(d_y); free(x); free(y); return 0; }
21,984
#include<iostream> #include<cstdio> using namespace std; __global__ void printDevice() { int x; x = threadIdx.x; printf(" Thread %d says Hello\n", x); } int main() { printDevice<<<2,10>>>(); }
21,985
#include <stdio.h> static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { ::printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); ::exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define N 100000 #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n",msg, cudaGetErrorString(__err), __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // this thread handles the data at its thread id if (tid < N) c[tid] = a[tid] + b[tid]; } extern "C" void Cuda_Main_Test1(){ int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) ); // cudaCheckErrors("cudamalloc fail"); // fill the arrays 'a' and 'b' on the CPU for (int i=0; i<N; i++) { a[i] = i; b[i] = i; c[i] = 0; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ) ); //HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ) ); cudaError_t status = cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice); // cudaCheckErrors("cuda memcpy fail"); //sleep(1); add<<<N,1>>>(dev_a,dev_b,dev_c); // sleep(1); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ) ); // if (status == cudaSuccess) { // printf( "0 CUDA Error: %s \n", cudaGetErrorString( status ) ); // } // if (status != cudaSuccess) { // printf( "1 CUDA Error: %s \n", cudaGetErrorString( status ) ); // // cout << "CUDA Error: " << cudaGetErrorString( cuerr ) << endl // } // cudaCheckErrors("cudamemcpy or cuda kernel fail"); // sleep(1); // display the results for (int i=0; i<N; i++) { // printf( "%d + %d = %d\n", a[i], b[i], c[i] ); printf( "%d%c ", c[i],((i%8==7)?'\n':' ')); } printf( "Start CPU add \n "); for (int i=0; i<N; i++) { c[i]=a[i]+b[i]; //printf( "%d%c ", c[i],((i%8==7)?'\n':' ')); } printf( "Stop CPU add \n "); // free the memory allocated on the GPU HANDLE_ERROR( cudaFree( dev_a ) ); HANDLE_ERROR( cudaFree( dev_b ) ); HANDLE_ERROR( cudaFree( dev_c ) ); }
21,986
/** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes matrix vector multiplication: A[N][N] * B[N] = C[N] * * */ void matvec(float * A, float * B, float * C, int N) { int i, j; for (i=0; i<N; i++) { float temp = 0.0; for (j=0; j<N; j++) temp += A[i*N+j] * B[j]; C[i] = temp; } } __global__ void matvec_kernel(float * A, float * B, float * C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j; if (i < N) { float temp = 0.0; for (j=0; j<N; j++) temp += A[i*N+j] * B[j]; C[i] = temp; } } /** * N =1024 * 4 blocks, 256 threads/per block */ __global__ void matvec_kernel_shared(float * A, float * B, float * C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; /* 0 - 1023 */ int j; extern __shared__ float B_shared[]; B_shared[i] = B[i]; /* for block 0: 0-255 are filled */ /* for block 1: 256-511 are filled */ /* for block 2: 512-767 are filled */ /* for block 3: 768 - 1023 are filled */ B_shared[(i+256)%1024] = B[(i+256)%1024]; B_shared[(i+512)%1024] = B[(i+512)%1024]; B_shared[(i+768)%1024] = B[(i+768)%1024]; __syncthreads(); if (i < N) { float temp = 0.0; for (j=0; j<N; j++) temp += A[i*N+j] * B_shared[j]; C[i] = temp; } } __global__ void matvec_kernel_shared_general(float * A, float * B, float * C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; /* 0 - 1023 */ int j; extern __shared__ float B_shared[]; int k; for (k=0; k<gridDim.x; k++) { B_shared[(threadIdx.x + k*blockDim.x)%N] = B[(threadIdx.x + k*blockDim.x)%N]; } __syncthreads(); if (i < N) { float temp = 0.0; for (j=0; j<N; j++) temp += A[i*N+j] * B_shared[j]; C[i] = temp; } } void vectorAdd_sequential(const float *A, const float *B, float *C, int numElements) { int i; for (i=0; i < numElements; i++) { C[i] = A[i] + B[i]; } } __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 1024; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size*size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size*size); // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size*size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = 4; //(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); matvec_kernel<<<blocksPerGrid, threadsPerBlock, size>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); err = cudaFree(d_B); err = cudaFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); printf("Done\n"); return 0; }
21,987
#include "includes.h" __global__ void init(int *vector, int N, int val) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < N) { vector[i] = val; } }
21,988
extern "C" __global__ void testKernel( float **inputPointers, float **outputPointers, int numPointers) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < numPointers) { outputPointers[tid] = inputPointers[tid]; } }
21,989
#include"cuda_runtime.h" #include"device_launch_parameters.h" #include<stdio.h> #include<string.h> __global__ void convert(char *s, int n) { int id,r=0,k=0; id = threadIdx.x; int z; z=s[id]; while(z>0) { r = z%10; z=z/10; k = k*10+r; } s[id]=(char)k; } int main(void) { int n,i; char s[100]; printf("Enter the string.\n"); scanf("%s\n",s); n = strlen(s); //printf("%d",n); int size; size = sizeof(char); char *d_s; cudaMalloc((void**)&d_s,n*size); cudaMemcpy(d_s,s,n*size,cudaMemcpyHostToDevice); convert<<<1,n>>>(d_s,n); cudaMemcpy(s,d_s,n*size,cudaMemcpyDeviceToHost); printf("\n"); for(i=0;i<n;i++) printf("%c",s[i]); cudaFree(d_s); return 0; }
21,990
#include <iostream> #include <numeric> #include <stdlib.h> #include <stdio.h> typedef struct{ int width; int height; float* elements; } Matrix; #define BLOCK_SIZE 3 __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); void MatMul(const Matrix A, const Matrix B, Matrix C) { Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width*A.height*sizeof(float); //allocate memory for matrix A on device cudaMalloc(&d_A.elements,size); //copy matrix A elements to memory allocated on device cudaMemcpy(d_A.elements, A.elements,size,cudaMemcpyHostToDevice); //do the same thing for matrix B Matrix d_B; d_B.width = B.width; d_B.height = B.height; //allocate memory for matrix B on device cudaMalloc(&d_B.elements,size); //copy matrix B elements to memory allocated on device cudaMemcpy(d_B.elements, B.elements,size,cudaMemcpyHostToDevice); //allocate memory for matrix C on device - obviously nothing to copy Matrix d_C; d_C.width = C.width; d_C.height = C.height; cudaMalloc(&d_C.elements,size); //define block size and grid size for kernel dim3 dimBlock(1,3); //dim3 dimGrid(B.width/dimBlock.x,A.height/dimBlock.y); //invoke kernel MatMulKernel<<<3, dimBlock>>>(d_A, d_B, d_C); //read matrix multiplication result from device cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); //Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } //The ACTUAL kernel that performs matrix multiplication in the GPU __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { //each kernel thread computes one element of C float Cvalue=0; //therefore, each thread is needs to read the row of A and column of B for its element of C. //reading from global memory here - would be better if the row and column were stored in shared memory int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e]* B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } int main(void) { float *a_els = new float[9]; for(int i = 0; i<9; ++i) a_els[i]=2.0; float *b_els = new float[9]; for(int i = 0; i<9; ++i) b_els[i]=2.0; Matrix m_a; m_a.height=3; m_a.width=3; m_a.elements = a_els; Matrix m_b; m_b.height=3; m_b.width=3; m_b.elements = a_els; float *c_els=new float[9]; Matrix m_c; m_c.height = 3; m_c.width = 3; m_c.elements=c_els; MatMul(m_a,m_b,m_c); for(int i =0; i<9;i++) printf("%f \n",m_c.elements[i]); delete[] a_els; delete[] b_els; delete[] c_els; return 0; }
21,991
#include <stdio.h> #define N 10000 #define THREADS 100 __global__ void saxpy(float *A, float*B, float X, float Y){ int i = blockDim.x*blockIdx.x+threadIdx.x; B[i] = A[i]*X; B[i] += Y; } int main() { float A[N], B[N], B2[N], X, Y; float *A_d, *B_d; int i; dim3 dimBlock(THREADS); dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x); for (i=0; i<N; i++) A[i] = i*2; X = 1.23; Y = 2.34; for (i=0; i<N; i++) B2[i] = A[i]*X + Y; // B2 is used for checking cudaMalloc((void**) &A_d, sizeof(float)*N); cudaMalloc((void**) &B_d, sizeof(float)*N); cudaMemcpy(A_d, A, sizeof(float)*N, cudaMemcpyHostToDevice); saxpy<<<dimGrid, dimBlock>>>(A_d, B_d, X, Y); cudaMemcpy(B, B_d, sizeof(float)*N, cudaMemcpyDeviceToHost); for (i=0; i<N; i++) if (fabs(B[i]-B2[i]) > 0.001) printf("%d: %f %f\n",i, B[i], B2[i]); cudaFree(A_d); cudaFree(B_d); }
21,992
#include "includes.h" __global__ void conv(float *t, float *tk, float *out, int t_rows, int t_columns, int n_channels, int k_rows, int k_columns, int n_kernels) { const int i_out = blockDim.y * blockIdx.y + threadIdx.y, j_out = blockDim.x * blockIdx.x + threadIdx.x; int i0 = i_out - k_rows/2, j0 = j_out - k_columns/2; if (i_out < t_rows && j_out < t_columns) for (int k = 0; k < n_kernels; k++) { float convolution = 0; for (int m = 0; m < k_rows; m++) for (int n = 0; n < k_columns; n++) for (int c = 0; c < n_channels; c++) if (-1 < i0 + m && i0 + m < t_rows && -1 < j0 + n && j0 + n < t_columns) convolution += t[((i0 + m)*t_columns + (j0 + n))*n_channels + c] * tk[(m*k_columns + n)*n_kernels + k]; out[(i_out*t_columns + j_out)*n_kernels + k] = convolution; } }
21,993
// wave 1D GPU // compile: nvcc -arch=sm_70 -O3 wave_1D.cu // run: ./a.out #include "stdio.h" #include "stdlib.h" #include "math.h" #include "cuda.h" #define DAT double #define GPU_ID 0 // typically 4 (0-3) on machines at stanford #define BLOCK_X 100 #define GRID_X 1 #define OVERLENGTH 1 //needed for extra staggered grid #define zeros(A,N) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc((N)*sizeof(DAT)); \ for(i=0; i < (N); i++){ A##_h[i]=(DAT)0.0; } \ cudaMalloc(&A##_d ,(N)*sizeof(DAT)); \ cudaMemcpy( A##_d,A##_h,(N)*sizeof(DAT),cudaMemcpyHostToDevice); #define free_all(A) free(A##_h); cudaFree(A##_d); #define gather(A,N) cudaMemcpy( A##_h,A##_d,(N)*sizeof(DAT),cudaMemcpyDeviceToHost); void save_array(DAT* A, int N, const char A_name[]){ char* fname; FILE* fid; asprintf(&fname, "%s.dat" , A_name); fid=fopen(fname, "wb"); fwrite(A, sizeof(DAT), N, fid); fclose(fid); free(fname); } #define SaveArray(A,N,A_name) gather(A,N); save_array(A##_h, N, A_name); void clean_cuda(){ cudaError_t ce = cudaGetLastError(); if(ce != cudaSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", cudaGetErrorString(ce)); cudaDeviceReset();} } // --------------------------------------------------------------------- // // Physics const DAT Lx = 10.0; const DAT k = 1.0; const DAT rho = 1.0; // Numerics const int nx = BLOCK_X*GRID_X-OVERLENGTH; const int nt = 200; const DAT dx = Lx/((DAT)nx); const DAT dt = dx/sqrt(k/rho)/2.1; // Computing physics kernels __global__ void init(DAT* x, DAT* P, const DAT Lx, const DAT dx, const int nx){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x if (ix<nx){ x[ix] = (DAT)ix*dx + (-Lx+dx)/2.0; } if (ix<nx){ P[ix] = exp(-(x[ix]*x[ix])); } } __global__ void compute_V(DAT* V, DAT* P, const DAT dt, const DAT rho, const DAT dx, const int nx){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x if (ix>0 && ix<nx){ V[ix] = V[ix] - dt*(P[ix]-P[ix-1])/dx/rho; } } __global__ void compute_P(DAT* V, DAT* P, const DAT dt, const DAT k, const DAT dx, const int nx){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x if (ix<nx){ P[ix] = P[ix] - dt*(V[ix+1]-V[ix])/dx*k; } } int main(){ int i, it; // Set up GPU int gpu_id=-1; dim3 grid, block; block.x = BLOCK_X; grid.x = GRID_X; gpu_id = GPU_ID; cudaSetDevice(gpu_id); cudaGetDevice(&gpu_id); cudaDeviceReset(); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); // set L1 to prefered printf("Process uses GPU with id %d .\n",gpu_id); // Initial arrays zeros(x,nx ); zeros(P,nx ); zeros(V,nx+1); // Initial conditions init<<<grid,block>>>(x_d, P_d, Lx, dx, nx); cudaDeviceSynchronize(); // Action for (it=0;it<nt;it++){ compute_V<<<grid,block>>>(V_d, P_d, dt, rho, dx, nx); cudaDeviceSynchronize(); compute_P<<<grid,block>>>(V_d, P_d, dt, k , dx, nx); cudaDeviceSynchronize(); }//it SaveArray(P,nx,"P_c"); free_all(x); free_all(P); free_all(V); clean_cuda(); }
21,994
#include "includes.h" __global__ void NormalizationExecutionKernel(unsigned char* src, float* dst, const int size, const float alpha, const float beta, const float bias) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < size){ dst[index] = (float)(src[index] - alpha) / beta + bias; } }
21,995
//pass //--gridDim=128 --blockDim=128 #include <cuda.h> __global__ void uniformAdd(float *g_data, float *uniforms, int n, int blockOffset, int baseIndex) { __shared__ float uni[1]; if (threadIdx.x == 0) uni[0] = uniforms[blockIdx.x + blockOffset]; /* BUGINJECT: MUTATE_OFFSET, UP, ZERO */ unsigned int address = blockIdx.x * (blockDim.x << 1) + baseIndex + threadIdx.x; __syncthreads(); // note two adds per thread #ifdef MUTATION // couldn't apply mutation above; apply here instead g_data[0] += uni[0]; #else g_data[address] += uni[0]; #endif g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni[0]; }
21,996
#include <iostream> #include <cstdlib> #include <cstdio> #include <curand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> #define N 10 using namespace std; struct node{ int base; int data; node *next; }; __device__ node *head[N]; __global__ void print_kernel() { int i= blockDim.x * blockIdx.x + threadIdx.x; if (i >= N){ return; } node *temp = head[i]; while (temp){ printf("%d and %d from %d\n", temp->base, temp->data, i); temp = temp->next; } } __global__ void setup_kernel() { int i= blockDim.x * blockIdx.x + threadIdx.x; if (i >= N){ return; } head[i]=NULL; node *end = head[i]; for (int j=i; j<i+5; j++){ node *temp = new node(); temp->base = j; temp->data = j; temp->next = NULL; if (end){ end->next = temp; end = end->next; } else{ head[i]=temp; end = head[i]; } } } int main(int argc, char const *argv[]) { int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock -1)/threadsPerBlock; setup_kernel<<<blocksPerGrid, threadsPerBlock>>>(); print_kernel<<<blocksPerGrid, threadsPerBlock>>>(); cudaDeviceReset(); return 0; }
21,997
#include<stdio.h> #include<stdlib.h> #include<sys/time.h> #define ARRAY_SIZE 5000000 #define TPB 256 void fill_data(float *var) { int i; if(var == NULL) return; for(i=0; i<ARRAY_SIZE; i++) { var[i] = 100 * (float)((float)rand()/RAND_MAX); } } void saxpy_cpu(float *x, float *y, float A) { struct timeval start_time; struct timeval stop_time; int cnt = 0; gettimeofday(&start_time, NULL); for(cnt=0; cnt<ARRAY_SIZE; cnt++) { y[cnt] = (A* x[cnt]) + y[cnt]; } gettimeofday(&stop_time, NULL); printf("Total time of Execution in CPU: %ld usec\n\n", (stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec)); } __global__ void kernel(float *x, float *y, float a, int size) { int i = (blockIdx.x*blockDim.x)+threadIdx.x; if(i < size) y[i] = (a*x[i]) + y[i]; __syncthreads(); } void saxpy_gpu(float *x, float *y, float A) { struct timeval start_time; struct timeval stop_time; gettimeofday(&start_time, NULL); kernel<<<(ARRAY_SIZE/TPB)+1, TPB>>>(x, y, A, ARRAY_SIZE); cudaDeviceSynchronize(); gettimeofday(&stop_time, NULL); printf("Total time of Execution in GPU: %ld usec\n\n", (stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec)); } int main() { float *X = NULL; float *Y = NULL; float *Y_GPU = NULL; float A = 2.3; float *gpuX = NULL; float *gpuY = NULL; X = (float*)malloc(ARRAY_SIZE*sizeof(float)); Y = (float*)malloc(ARRAY_SIZE*sizeof(float)); Y_GPU = (float*)malloc(ARRAY_SIZE*sizeof(float)); fill_data(X); fill_data(Y); cudaMalloc(&gpuX, ARRAY_SIZE*sizeof(float)); cudaMalloc(&gpuY, ARRAY_SIZE*sizeof(float)); cudaMemcpy(gpuX, X, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(gpuY, Y, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); saxpy_cpu(X, Y, A); printf("Computing SAXPY on the CPU... Done!\n\n"); saxpy_gpu(gpuX, gpuY, A); printf("Computing SAXPY on the GPU... Done!\n\n"); cudaMemcpy(Y_GPU, gpuY, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost); int i = 0; for(i=0; i<ARRAY_SIZE; i++) { if(((Y[i] - Y_GPU[i]) < -0.05) || ((Y[i] - Y_GPU[i]) > 0.05)) { printf("Comparing the output of each implementation.. Mismatch at index %d\n",i); break; } } if(i == ARRAY_SIZE) printf("Comparing the output of each implementation.. Correct\n"); cudaFree(gpuX); cudaFree(gpuY); free(X); free(Y); return 0; }
21,998
#include "includes.h" __global__ void set_carr(float br, float bi, float * c, int N) { int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx>=N) return; int idc=idx*2; c[idc]=br;c[idc+1]=bi; }
21,999
#include <stdio.h> #include <cuda_runtime.h> #define RANGESTART 40000000 #define RANGEEND 50000000 __device__ int is_prime(const int p) { for (int i = 3; i <= sqrtf(p); i++) { if (p % i == 0) { return 0; } } return 1; } __global__ void goldbach(int* result) { int id = blockIdx.x*blockDim.x+threadIdx.x; if( (id+RANGESTART)%2 == 1){ return; } int i = 2; for (int j = id + RANGESTART - i; j > 2; j--, i++) { if (is_prime(i) == 1 && is_prime(j) == 1) { if (id<20) { printf("[Thread %d] The first sum is %d + %d = %d \n", id, i, j, id + RANGESTART); } result[id] = 0; return; } } result[id] = 1; } int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; int numElements = RANGEEND - RANGESTART; size_t size = numElements * sizeof(int); size_t primes_size = RANGEEND*sizeof(int); int *h_result = (int *)malloc(size); // Verify that allocations succeeded if (h_result == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Allocate the device result int *d_result = NULL; cudaMalloc((void **)&d_result, size); cudaMemset(d_result, 0, size); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate data (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); goldbach<<<blocksPerGrid, threadsPerBlock>>>(d_result); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_result, d_result, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (h_result[i] == 1) { fprintf(stderr, "Test failed for number %d!\n", RANGESTART + i); exit(EXIT_FAILURE); } } printf("Test PASSED: Goldbach was right\n"); // Free device global memory cudaFree(d_result); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device memory (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_result); // Reset the device and exit err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
22,000
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define MIN -1024 #define MAX 1024 #define FALSE 0 #define TRUE 1 #include <unistd.h> #include <stdint.h> #include <stdlib.h> // #define BENCH_PRINT /*----------- using cycle counter ------------*/ __inline__ uint64_t rdtsc() { uint32_t lo, hi; /* We cannot use "=A", since this would use %rax on x86_64 */ __asm__ __volatile__("rdtsc" : "=a" (lo), "=d" (hi)); return (uint64_t)hi << 32 | lo; } unsigned long long start_cycles; #define startCycle() (start_cycles = rdtsc()) #define stopCycle(cycles) (cycles = rdtsc()-start_cycles) /*--------- using gettimeofday ------------*/ #include <sys/time.h> struct timeval starttime; struct timeval endtime; #define startTime() \ { \ gettimeofday(&starttime, 0); \ } #define stopTime(valusecs) \ { \ gettimeofday(&endtime, 0); \ valusecs = (endtime.tv_sec - starttime.tv_sec) * 1000000 + endtime.tv_usec - starttime.tv_usec; \ } int SeqPersistence(int x) { int sum, y, pers = 0; while (x >= 10) { sum = 0; y = x; while (y > 0) { sum += y % 10; y /= 10; } x = sum; pers++; } return pers; } void SeqArrPersistance(int *in, int *out, int n) { int i; for (i = 0; i < n; i++) { out[i] = SeqPersistence(in[i]); } } __device__ int Persistance(int x){ int sum, y, pers = 0; while (x >= 10) { sum = 0; y = x; while (y > 0) { sum += y % 10; y /= 10; } x = sum; pers++; } return pers; } /* 0 1 2 3 0 1 2 3 0 1 2 3 x x x x x x x x x x x x gridDim.x*blockDim.x = 12 0 x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x 1 x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x */ __global__ void MatrixPersistance(int* dev_in, int* dev_out, int n, int m){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int index = idy*gridDim.x*blockDim.x + idx; if (index < n*m){ dev_out[index] = Persistance(dev_in[index]); } } int main(int argc, char* argv[]) { int *in, *dev_in, *dev_out, *out, *cuda_out, i, n,m; int test; cudaError_t cudaStatus; srand(time(NULL)); if (argc == 3) { n = atoi(argv[1]); m = atoi(argv[2]); } else { printf("N?"); scanf("%d", &n); printf("M?"); scanf("%d", &m); } in = (int*)malloc(m*n * sizeof(int)); out = (int*)malloc(m*n * sizeof(int)); cuda_out = (int*)malloc(m*n * sizeof(int)); for (i = 0; i < m*n; i++) { in[i] = abs(rand() / (double)RAND_MAX * (MAX - MIN) + MIN); } cudaStatus = cudaMalloc((void**)&dev_in, m*n * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); exit(-1); } cudaStatus = cudaMalloc((void**)&dev_out, m*n * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); exit(-1); } cudaStatus = cudaMemcpy(dev_in, in, m*n * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); exit(-1); } int numBlocksN = ceil((float)(n) / 32.0); int numBlocksM = ceil((float)(m) / 32.0); int numThreadsPerBlock = 32; dim3 dimGrid(numBlocksN, numBlocksM); dim3 dimBlock(numThreadsPerBlock, numThreadsPerBlock); double elapsed_time_host, elapsed_time_device; startTime(); MatrixPersistance <<<dimGrid, dimBlock>>>(dev_in, dev_out, n, m); cudaStatus = cudaDeviceSynchronize(); stopTime(elapsed_time_device); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); exit(-1); } cudaMemcpy(cuda_out, dev_out, n*m * sizeof(int), cudaMemcpyDeviceToHost); startTime(); SeqArrPersistance(in, out, n*m); stopTime(elapsed_time_host); test = TRUE; for (i = 0; i < n*m; i++){ if (out[i] != cuda_out[i]){ printf("%d\t%d\t%d\n", i, out[i], cuda_out[i]); test = FALSE; //break; } } if (test){ printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } printf("Seq: %ld\tPar: %ld\n", elapsed_time_host, elapsed_time_device); cudaFree(dev_in); cudaFree(dev_out); free(in); free(out); free(cuda_out); system("PAUSE"); return 0; }