serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
21,201
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <limits.h> #include <time.h> #define NV 5 // number of vertices void createGraph(float *arr, int N) { time_t t; // used for randomizing values int col; int row; int maxWeight = 100; // limit the weight an edge can have srand((unsigned) time(&t)); // generate random for (col = 0; col < sqrt(N); col++) { for(row = 0; row < sqrt(N); row++) { if( col != row){ arr[(int)(row*sqrt(N)) + col] = rand() % maxWeight; // assign random // have a symmetric graph arr[(int)(col*sqrt(N)) + row] = arr[(int)(row*sqrt(N)) + col]; } else arr[(int)(row*sqrt(N)) + col] = 0; // NO LOOPS } } } void printGraph(float *arr, int n) { for (int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { printf("%f ", arr[i * n + j]); } printf("\n"); } } __global__ void gpuFloyd(int n, float* arr, int k) { int tid = threadIdx.x; int gid = blockIdx.x * blockDim.x + threadIdx.x; if(gid >= n) { return; } int idx = n * blockIdx.y + gid; __shared__ int shortest_distance; if(tid == 0) { shortest_distance = arr[n * blockIdx.y + k]; } __syncthreads(); int node_distance = arr[k * n + gid]; int total_distance = shortest_distance + node_distance; if (arr[idx] > total_distance){ arr[idx] = total_distance; } __syncthreads(); } void cpuFloyd(int n, float* cpuGraph) { for (int k = 0; k < n; k++) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (cpuGraph[i * n + j] > (cpuGraph[i * n + k] + cpuGraph[k * n + j])) { cpuGraph[i * n + j] = cpuGraph[i * n + k] + cpuGraph[k * n + j]; } } } } } void valid(int n, float* cpuGraph, float* gpuGraph) { printf("VALIDATING that cpuGraph array from CPU and gpuGraph array from GPU match... \n"); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (cpuGraph[i * n + j] != gpuGraph[i * n + j]) { printf("ERROR MISMATCH in array cpuGraph i %d j %d CPU SAYS %f and GPU SAYS %f \n", i, j, cpuGraph[i * n + j], gpuGraph[i * n + j]); } } } printf("OK \n\n"); } int main(int argc, char **argv) { clock_t t; float *hostArr, *gpuGraph; float *devArr; float *graph, *cpuGraph; int i, j; int n = NV; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); printf("\n"); printf("RUNNING WITH %d VERTICES \n", n); printf("\n"); cudaMalloc(&devArr, n * n * sizeof (float)); //CPU arrays graph = (float *) malloc(n * n * sizeof (float)); cpuGraph = (float *) malloc(n * n * sizeof (float)); //GPU arrays hostArr = (float *) malloc(n * n * sizeof (float)); gpuGraph = (float *) malloc(n * n * sizeof (float)); // Randomize distances in between each node createGraph(graph, (n*n)); // Printing graph //printGraph(graph, n); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { cpuGraph[i * n + j] = graph[i * n + j]; hostArr[i * n + j] = graph[i * n + j]; } } // First Mem Copy cudaMemcpy(devArr, hostArr, n * n * sizeof (float), cudaMemcpyHostToDevice); // For GPU Calculation int gputhreads = 512; // Kernel call // dim3 dimGrid(n, n, 1); dim3 dimGrid((n + gputhreads - 1) / gputhreads, n); cudaEventRecord(start); for(int k = 0; k < n; k++) { gpuFloyd<<<dimGrid, gputhreads>>>(n, devArr, k); } cudaEventRecord(stop); // Second Mem Copy cudaMemcpy(gpuGraph, devArr, n * n * sizeof (float), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("GPU Calculation Time elapsed: %.20f milliseconds\n", milliseconds); // CPU calculation t = clock(); cpuFloyd(n, cpuGraph); t = clock() - t; printf("CPU Calculation Time elapsed: %.20f milliseconds\n\n", (((float)t)/CLOCKS_PER_SEC)*1000); // Check validation of cpuGraph array from CPU calc and gpuGraph array from GPU calc // See if the two arrays match valid(n, cpuGraph, gpuGraph); /* printf("Graph from GPU:\n"); printGraph(gpuGraph, n); printf("\n"); printf("Graph from CPU:\n"); printGraph(cpuGraph, n); printf("\n"); */ cudaFree(devArr); free(graph); free(cpuGraph); free(hostArr); free(gpuGraph); printf("FINISHED!!! \n"); return 0; }
21,202
// // main.cpp // Parallel Degree of Separation // // Created by Cary on 11/16/14. // Copyright (c) 2014 Cary. All rights reserved. // #include <iostream> #include <fstream> #include <cstdlib> #include <map> #include <vector> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <sys/time.h> #include <time.h> #include <math.h> #include <cuda_runtime.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include <thrust/device_vector.h> #define limit 8000*8000 __global__ void relation(int *users, int *input_user1, int *input_user2,int *num_edge, int *num_node, int *base) { int thid = threadIdx.x; int start_pos = blockIdx.x*blockDim.x; int user = users[thid+start_pos+*base]; if(thid+start_pos+*base < *num_node) { for(int i = 0; i < *num_edge; i++) { if(input_user1[i] == user) input_user1[i] = thid+start_pos+*base; if(input_user2[i] == user) input_user2[i] = thid+start_pos+*base; } } } __global__ void search_first_level(int *users, int *input_user1, int *input_user2, int *level_content, int *parent_content, int *friend_list, int *num_node, int *num_edge, int *num_friend, int *bound, int *offset, int *first_level, int *base) { int thid = threadIdx.x; int start_pos = blockIdx.x*blockDim.x; int j = 0; if(thid+start_pos+*base < *num_node) { for(int i = 0; i < *num_edge; i++) { if(input_user1[i] == thid+start_pos+*base) { //if(user == 5) //{ printf("Find User 30!!!!!!! His Friend: %d\n ",input_user2[i]); //} friend_list[(thid+start_pos)*(*num_node)+input_user2[i]] = input_user2[i]; level_content[(thid+start_pos)*(*num_node)+j] = input_user2[i]; first_level[offset[thid+start_pos+*base]+j] = input_user2[i]; j++; } if(input_user2[i] == thid+start_pos+*base) { //if(user == 5) //{ printf("Find User 30!!!!!!! His Friend: %d\n ",input_user1[i]); //} friend_list[(thid+start_pos)*(*num_node)+input_user1[i]] = input_user1[i]; level_content[(thid+start_pos)*(*num_node)+j] = input_user1[i]; first_level[offset[thid+start_pos+*base]+j] = input_user1[i]; j++; } } num_friend[(thid+start_pos+*base)] = j; bound[(thid+start_pos+*base)] = j; } } __global__ void search_other_level(int *input_user1, int *input_user2, int *level_content, int *parent_content, int *friend_list, int *num_node, int *num_friend, int *in_bound_1, int *in_bound_2,int *offset, int *first_level, int *base, int *count) { int thid = threadIdx.x; int start_pos = blockIdx.x*blockDim.x+*base; if(thid+start_pos-*base < *count) { int l_bound = in_bound_1[thid+start_pos]; int u_bound = in_bound_2[thid+start_pos]; in_bound_1[thid+start_pos] = u_bound; for(int k = l_bound; k < u_bound; k++) { int friend_t = level_content[(thid+start_pos-*base)*(*num_node)+k]; if(friend_t == -1) { break; } else { for(int n = 0; n < num_friend[friend_t]; n++) { int temp = first_level[offset[friend_t]+n]; if((thid+start_pos-*base)*(*num_node)+temp > (*count)*(*num_node)) printf("AAAAAAA %d %d %d %d %d\n",temp,(thid+start_pos-*base)*(*num_node)+temp,(*count)*(*num_node), thid+start_pos-*base,(*count) ); if(friend_list[(thid+start_pos-*base)*(*num_node)+temp] == -1 && temp != thid+start_pos) { friend_list[(thid+start_pos-*base)*(*num_node)+temp] = temp; level_content[(thid+start_pos-*base)*(*num_node)+in_bound_2[thid+start_pos]] = temp; parent_content[(thid+start_pos-*base)*(*num_node)+in_bound_2[thid+start_pos]] = k; in_bound_2[thid+start_pos]++; } } } } } } __global__ void find(int *user2,int *friend_list, int *level_content, int *parent_content, int *num_node, int *output, int *outsize, int *found, int *base) { int thid = threadIdx.x; int start_pos = blockIdx.x*blockDim.x+*base; int parent_index = 0; if(thid+start_pos < *num_node) { if(level_content[(thid+start_pos)] == *user2 && friend_list[*user2] != -1 ) { *found = 1; parent_index = parent_content[(thid+start_pos)]; output[*outsize] = level_content[(thid+start_pos)]; (*outsize)++; while(parent_index != -1) { output[*outsize] = level_content[parent_index]; parent_index = parent_content[parent_index]; (*outsize)++; } // printf("OUTSIZE %d\n", *outsize); } } } std::map<int, std::vector<int> > unique_user; void addEdge(int user_id1, int user_id2) { std::map<int, std::vector<int> >::iterator it; // Insert edge into user_id1's firend list it = unique_user.find(user_id1); if (it == unique_user.end()) { std::vector<int> friend_list(1, user_id2); unique_user.insert(std::pair<int, std::vector<int> >(user_id1, friend_list)); } else { it->second.push_back(user_id2); } // Insert edge into id2's firend list it = unique_user.find(user_id2); if (it == unique_user.end()) { std::vector<int> friend_list(1, user_id1); unique_user.insert(std::pair<int, std::vector<int> >(user_id2, friend_list)); } else { it->second.push_back(user_id1); } } int main(int argc, const char * argv[]) { if (argc < 2) { std::cout<<"Please enter the path of input data file\n"; return 0; } struct timeval starttime,endtime; thrust::device_vector<int> user1; thrust::device_vector<int> user2; gettimeofday(&starttime,NULL); //reading file std::ifstream data_file; data_file.open(argv[1]); if (data_file.is_open()) { //cout<<"File opened"<<endl; int id1, id2; while (data_file.eof() == false) { data_file>>id1>>id2; user1.push_back(id1); user2.push_back(id2); addEdge(id1,id2); } } else { std::cout<<"File did not open\n"; exit(0); } data_file.close(); int num_node = unique_user.size(); int num_edge = user1.size(); std::cout<<"#Node := "<<num_node<<"\n"; std::cout<<"#Edge := "<<num_edge<<"\n"; int *offset = new int [num_node]; int *users = new int[num_node]; std::map<int, std::vector<int> >::iterator it; int ttt = 0; for(it = unique_user.begin(); it != unique_user.end(); ++it) { users[ttt] = it->first; offset[ttt] = it->second.size(); ttt++; } if(ttt != num_node) std::cout<<"ERROR!!!!! \n"; int add = offset[num_node-1]; thrust::exclusive_scan(offset, offset + num_node , offset); // in-place scan gettimeofday(&endtime,NULL); long long time = ((endtime.tv_sec * 1000000 + endtime.tv_usec) - (starttime.tv_sec * 1000000 + starttime.tv_usec)); printf(">>>Initializaton takes := %lld microseconds \n\n", time); int search_depth; std::cout<<"#Level you want to search: "; std::cin>>search_depth; int num_threads = limit/num_node; int block_size = num_threads > 512 ? 512 : num_threads; int num_blocks = ceil(num_threads/block_size); std::cout<<"#threads: "<<num_threads<<" #Blocks: "<<num_blocks<<"\n"; dim3 dimGrid(num_blocks); dim3 dimBlock(block_size); float para_time; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); long long num_num = (long long)num_node*num_node; int *host_level_content = new int [num_num]; int *host_parent_content = new int [num_num]; int *host_friend_list = new int [num_num]; int *device_user1 = thrust::raw_pointer_cast(&user1[0]); int *device_user2 = thrust::raw_pointer_cast(&user2[0]); int *device_offset; int *device_users; int *device_level_content; int *device_parent_content; int *device_num_friend; int *friend_list; int *device_num_node; int *device_num_edge; int *inbound_1; int *inbound_2; int *device_first_level; int *device_base; int *device_count; cudaError_t error; cudaMalloc((int**)&device_num_friend,num_node*sizeof(int)); cudaMalloc((int**)&device_offset,num_node*sizeof(int)); cudaMalloc((int**)&device_users,num_node*sizeof(int)); cudaMalloc((int**)&device_first_level,(offset[num_node-1]+add)*sizeof(int)); cudaMalloc((int**)&device_num_node,sizeof(int)); cudaMalloc((int**)&device_num_edge,sizeof(int)); cudaMalloc((int**)&inbound_1,num_node*sizeof(int)); cudaMalloc((int**)&inbound_2,num_node*sizeof(int)); cudaMalloc((int**)&device_base,sizeof(int)); cudaMalloc((int**)&device_count,sizeof(int)); cudaMemset((int**)inbound_1,0,num_node*4); cudaMemcpy(device_num_node, &num_node, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_num_edge, &num_edge, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_offset, offset, num_node*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_users, users, num_node*sizeof(int), cudaMemcpyHostToDevice); error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA first error: %s\n", cudaGetErrorString(error)); exit(-1); } long long count = 0; long long base = 0; count = limit/num_node; if(count > num_node) count = num_node; do { cudaMemcpy(device_base, &base, sizeof(int), cudaMemcpyHostToDevice); relation<<<dimGrid,dimBlock>>>(device_users, device_user1, device_user2, device_num_edge, device_num_node, device_base); base = base + count; if(base+count > num_node) count = num_node - base; }while(base < num_node); base = 0; count = limit/num_node; if(count > num_node) count = num_node; do{ cudaMalloc((int**)&device_level_content,count*num_node*sizeof(int)); cudaMalloc((int**)&device_parent_content,count*num_node*sizeof(int)); cudaMalloc((int**)&friend_list,count*num_node*sizeof(int)); error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA second error: %s\n", cudaGetErrorString(error)); exit(-1); } cudaMemset((int**)device_level_content,-1,count*num_node*4); cudaMemset((int**)device_parent_content,-1,count*num_node*4); cudaMemset((int**)friend_list,-1,count*num_node*4); cudaMemcpy(device_base, &base, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_count, &count, sizeof(int), cudaMemcpyHostToDevice); error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA third error: %s\n", cudaGetErrorString(error)); exit(-1); } search_first_level<<<dimGrid,dimBlock>>>(device_users,device_user1, device_user2, device_level_content, device_parent_content, friend_list, device_num_node, device_num_edge, device_num_friend,inbound_2, device_offset, device_first_level,device_base); for(int i = 1; i < search_depth; i++) { search_other_level<<<dimGrid,dimBlock>>>(device_user1, device_user2, device_level_content, device_parent_content, friend_list, device_num_node, device_num_friend, inbound_1, inbound_2,device_offset, device_first_level,device_base,device_count); } error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA search_level error: %s\n", cudaGetErrorString(error)); exit(-1); } cudaMemcpy((host_level_content+base*num_node), device_level_content, count*num_node*4, cudaMemcpyDeviceToHost); cudaMemcpy((host_parent_content+base*num_node), device_parent_content, count*num_node*4, cudaMemcpyDeviceToHost); // printf("BASE: %d Count %d Node %d\n",base, count, num_node); // if(base*num_node + count*num_node > num_node*num_node) // printf("OUT OF BOUND!!!!!!! %d %d %d %d\n",base*num_node,count*num_node,base*num_node + count*num_node,num_node*num_node); cudaMemcpy((host_friend_list+base*num_node), friend_list, count*num_node*4, cudaMemcpyDeviceToHost); cudaFree(device_level_content); cudaFree(device_parent_content); cudaFree(friend_list); base = base + count; if(base+count > num_node) count = num_node - base; }while(base < num_node); cudaFree(device_num_edge); cudaFree(device_num_friend); cudaFree(inbound_1); cudaFree(inbound_2); cudaFree(device_base); cudaFree(device_count); cudaFree(device_offset); cudaFree(device_users); error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA first free error: %s\n", cudaGetErrorString(error)); exit(-1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&para_time,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("\nParallel Search Time:%f microseconds",para_time); bool should_continue = true; while (should_continue) { int user_id1, user_id2; std::map<int, std::vector<int> >::iterator it; do { std::cout<<"\n"; std::cout<<"Enter first user's ID: "; std::cin >> user_id1; it = unique_user.find(user_id1); if(it == unique_user.end()) std::cout << "User Not Exist\n"; } while(it == unique_user.end()); do { std::cout<<"\n"; std::cout<<"Enter second user's ID: "; std::cin >> user_id2; it = unique_user.find(user_id2); if(it == unique_user.end()) std::cout << "User Not Exist\n"; } while(it == unique_user.end()); int base_base; int base_base_1; int base_found = 0; int base_found_1 = 0; for(int i = 0; i < num_node; i++) { if(users[i] == user_id1) { base_base = i; base_found = 1; } if(users[i] == user_id2) { base_base_1 = i; base_found_1 = 1; } if(base_found == 1 && base_found_1 == 1) break; } // printf("BASEBASE %d %d\n", base_base, users[base_base]); int* device_input2; int* device_output; int* device_size; int* found; int *found_host = new int; int *size = new int; int *result = new int[*size]; cudaMalloc((int**)&device_level_content,num_node*sizeof(int)); cudaMalloc((int**)&device_parent_content,num_node*sizeof(int)); cudaMalloc((int**)&friend_list,num_node*sizeof(int)); cudaMalloc((int**)&device_input2,sizeof(int)); cudaMalloc((int**)&device_size,sizeof(int)); cudaMalloc((int**)&device_output,search_depth*sizeof(int)); cudaMalloc((int**)&found,sizeof(int)); cudaMalloc((int**)&device_base,sizeof(int)); error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA second malloc error: %s\n", cudaGetErrorString(error)); exit(-1); } cudaMemset((int**)device_output,0,search_depth*4); cudaMemset((int**)device_size,0,4); cudaMemset((int**)found,0,4); cudaMemcpy(device_input2, &base_base_1, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_level_content, host_level_content+base_base*num_node, num_node*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_parent_content, host_parent_content+base_base*num_node, num_node*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(friend_list, host_friend_list+base_base*num_node, num_node*sizeof(int), cudaMemcpyHostToDevice); error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA second memset memcpy error: %s\n", cudaGetErrorString(error)); exit(-1); } base = 0; count = limit/num_node; if(count > num_node) count = num_node; do{ cudaMemcpy(device_base, &base, sizeof(int), cudaMemcpyHostToDevice); find<<<dimGrid, dimBlock>>>(device_input2, friend_list,device_level_content, device_parent_content, device_num_node, device_output, device_size,found,device_base); cudaMemcpy(found_host, found, sizeof(int), cudaMemcpyDeviceToHost); base = base + count; if(base+count > num_node) count = num_node - base; }while(base < num_node && !(*found_host)); error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA find error: %s\n", cudaGetErrorString(error)); exit(-1); } cudaMemcpy(size, device_size, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(result, device_output, (*size)*sizeof(int), cudaMemcpyDeviceToHost); //printf("SIZE %d\n",*size); error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA find error: %s\n", cudaGetErrorString(error)); exit(-1); } cudaFree(device_input2); cudaFree(device_size); cudaFree(device_output); cudaFree(found); cudaFree(device_level_content); cudaFree(device_parent_content); cudaFree(device_base); if(*found_host == 0) std::cout<<"\nUser "<<user_id1<<" and User "<<user_id2<<" are not connected within search level := "<<search_depth<<"\n\n"; else { std::cout<<"\nDegree of Separation between User "<<user_id1<<" and User "<<user_id2<<" is "<<(*size)<<"\n\n"; for (int i = 0; i < (*size); i++) { std::cout<<users[result[i]]<<"<-"; } std::cout<<user_id1<<"\n"; } std::cout<<"\nDo you want to continue? 1 for yes and 0 for no: "; std::cin>>should_continue; delete[] result; delete size; delete found_host; } delete[] offset; delete[] users; delete[] host_level_content; delete[] host_parent_content; delete[] host_friend_list; return 0; }
21,203
#ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #define TILE_SIZE 1024 // You can use any other block size you wish. #define BLOCK_SIZE 256 // Host Helper Functions (allocate your own data structure...) // Device Functions // Kernel Functions // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. Make your own kernel // functions in this file, and then call them from here. // Note that the code has been modified to ensure numElements is a multiple // of TILE_SIZE void prescanArray(float *outArray, float *inArray, int numElements) { } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
21,204
/* ============================================================================ Name : add_vector_with_streams.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <fstream> #include <chrono> #include <assert.h> #define CUDA_API_PER_THREAD_DEFAULT_STREAM #include <cuda.h> #include <cuda_runtime.h> inline cudaError_t checkCUDA(cudaError_t result){ if(result != cudaSuccess){ fprintf(stderr, "CUDA Runtime error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } __global__ void addVector(float* a, float* b, float* c, int N) { int stride = blockDim.x * gridDim.x; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += stride) { c[i] = a[i] + b[i]; } } __global__ void init( float *a, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < n; i += stride) { a[i]=static_cast <float> (i); } } void addWithStreams(){ std::chrono::system_clock::time_point start; std::chrono::system_clock::time_point stop; std::chrono::duration<double> elapsed_time; int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); const int N = 2<<16; size_t size = N * sizeof(float); size_t big = N * 300* sizeof(float); float *data; cudaMallocManaged(&data, big); cudaMemPrefetchAsync(data, big, deviceId); cudaStream_t streams[100]; start = std::chrono::high_resolution_clock::now(); for(int i = 0; i < 100; i++){ size_t threadsPerBlock = 256; size_t numberOfBlocks = 32; cudaStreamCreate(&streams[i]); init<<<numberOfBlocks, threadsPerBlock, 0, streams[i]>>>(&data[i*N], N); init<<<numberOfBlocks, threadsPerBlock, 0, streams[i]>>>(&data[i*N+N*100], N); addVector<<<numberOfBlocks, threadsPerBlock, 0, streams[i]>>>(&data[i*N],&data[i*N+N*100],&data[i*N+N*200], N); checkCUDA(cudaGetLastError()); } checkCUDA(cudaDeviceSynchronize()); stop = std::chrono::high_resolution_clock::now(); elapsed_time = stop - start; std::cout << "with: "<< elapsed_time.count() << std::endl; cudaDeviceReset(); } void addWithoutStreams(){ std::chrono::system_clock::time_point start; std::chrono::system_clock::time_point stop; std::chrono::duration<double> elapsed_time; int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); const int N = 2<<16; size_t size = N * sizeof(float); size_t big = N * 300* sizeof(float); float *data; cudaMallocManaged(&data, big); cudaMemPrefetchAsync(data, big, deviceId); start = std::chrono::high_resolution_clock::now(); for(int i = 0; i < 100; i++){ size_t threadsPerBlock = 256; size_t numberOfBlocks = numberOfSMs * 32; init<<<numberOfBlocks, threadsPerBlock>>>(&data[i*N], N); init<<<numberOfBlocks, threadsPerBlock>>>(&data[i*N+N*100], N); cudaDeviceSynchronize(); addVector<<<numberOfBlocks, threadsPerBlock>>>(&data[i*N],&data[i*N+N*100],&data[i*N+N*200], N); } cudaDeviceSynchronize(); stop = std::chrono::high_resolution_clock::now(); elapsed_time = stop - start; std::cout << "without: "<<elapsed_time.count() << std::endl; cudaFree(data); } void addOneLongVector(){ std::chrono::system_clock::time_point start; std::chrono::system_clock::time_point stop; std::chrono::duration<double> elapsed_time; int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); const int N = 2<<16; size_t size = N * sizeof(float); size_t big = N * 300* sizeof(float); float *data; cudaMallocManaged(&data, big); cudaMemPrefetchAsync(data, big, deviceId); start = std::chrono::high_resolution_clock::now(); size_t threadsPerBlock = 256; size_t numberOfBlocks = numberOfSMs * 32; init<<<numberOfBlocks, threadsPerBlock>>>(data, N*100); init<<<numberOfBlocks, threadsPerBlock>>>(&data[N*100], N*100); cudaDeviceSynchronize(); addVector<<<numberOfBlocks, threadsPerBlock>>>(data,&data[N*100],&data[N*2*100], N); cudaDeviceSynchronize(); stop = std::chrono::high_resolution_clock::now(); elapsed_time = stop - start; std::cout << "one long: "<<elapsed_time.count() << std::endl; cudaFree(data); } int main() { std::cout << "Work"<< std::endl; addWithStreams(); addWithoutStreams(); addOneLongVector(); std::cout << "End"<< std::endl; }
21,205
#include <iostream> #include <cmath> #include <chrono> #include <random> #include <limits> #include <cuda.h> typedef std::chrono::high_resolution_clock Clock; #define NUM_TEST 10000000 #define NUM_BLOCKS 1 #define NUM_THREADS 256 #define K 100 using namespace std; // Helper function for modular exponentiation. // Returns a^e (mode n) __device__ unsigned long long modexp(unsigned long long a, unsigned long long e, unsigned long long n) { unsigned long long res = 1; a = a % n; // Compute a mod n first (if a > n) while (e > 0) { // exponent is odd if (e & 1) res = (res * a) % n; // exponent is even e = e >> 1; // Shift right one (divide by 2) a = (a * a) % n; // Set a = a^2 mod n } return res; } // Called each iteration of witness loop. // Returns false if composite or true if probably prime __device__ bool witnessTest(unsigned long long d, unsigned long long n, float random_num) { // Pick a random number in [2..n-2] // Corner cases make sure that n > 4 unsigned long long a = random_num * (n-4) + 2; unsigned long long x = modexp(a, d, n); if (x == 1ULL || x == n-1) return true; // Iterate r times (2^r * d = n - 1) while (d != n-1) { x = (x * x) % n; d *= 2ULL; if (x == 1ULL) { return false; } if (x == n-1) { return true; } } // Return composite return false; } // See: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test // Returns true if k-probably prime (k is a parameter that determines accuracy) // Returns false if composite __global__ void millerRabinPrimalityTest(unsigned long long *nums, unsigned len, bool *isPrime, unsigned long long k, float *random_nums) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= len) return; int n = nums[idx]; if (n == 4ULL) { isPrime[idx] = false; return; } if (n <= 3ULL) { isPrime[idx] = true; return; } // Find r such that n = 2^d * r + 1 for some r >= 1 unsigned long long d = n - 1; while (d % 2 == 0ULL) { d /= 2ULL; } // Witness loop to repeat k times for (unsigned long long i = 0; i < k; i++) { if (!witnessTest(d, n, random_nums[k])){ isPrime[idx] = false; return; } } isPrime[idx] = true; } void random_test() { random_device rd; mt19937_64 eng(rd()); uniform_int_distribution<unsigned long> distr; float *random_nums = new float[K]; for (int i=0; i<K; i++) { random_nums[i] = (float) distr(eng) / (ULONG_MAX); } cout << "Starting Miller-Rabin CUDA test for " << NUM_TEST << " numbers with parameter k = " << K << ". Tests primality with accuracy " << (1 - (1/pow(4, K))) << "." << endl; auto begin = Clock::now(); vector<unsigned long long> test; for (int i = 0; i < NUM_TEST; i++) { test.push_back(distr(eng)); } unsigned long long *d_nums; bool *d_isPrime; float *d_random_nums; bool *isPrime = new bool[NUM_TEST]; cudaMalloc((void **) &d_random_nums, K * sizeof(float)); cudaMalloc((void **) &d_isPrime, NUM_TEST * sizeof(bool)); cudaMalloc((void **) &d_nums, NUM_TEST * sizeof(unsigned long long)); cudaMemcpy((void *) d_nums, test.data(), NUM_TEST * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy((void *) d_random_nums, random_nums, K * sizeof(float), cudaMemcpyHostToDevice); millerRabinPrimalityTest<<<(NUM_TEST + NUM_THREADS - 1)/NUM_THREADS, NUM_THREADS>>>(d_nums, test.size(), d_isPrime, K, d_random_nums); cudaMemcpy(isPrime, d_isPrime, NUM_TEST * sizeof(bool), cudaMemcpyDeviceToHost); auto end = Clock::now(); auto totalDuration = chrono::duration_cast<chrono::nanoseconds>(end - begin).count(); auto avgDuration = ((double) totalDuration) / NUM_TEST; cout << "Total Time: " << totalDuration << " nanoseconds" << endl; cout << "Average Time per iteration: " << avgDuration << " nanoseconds" << endl; cudaFree(d_isPrime); cudaFree(d_nums); cudaFree(d_random_nums); delete[] isPrime; } void single_test() { random_device rd; mt19937_64 eng(rd()); uniform_int_distribution<unsigned long> distr; float *random_nums = new float[K]; for (int i=0; i<K; i++) { random_nums[i] = (float) distr(eng) / (ULONG_MAX); } int numTest = 10; cout << "Starting Miller-Rabin CUDA test for " << numTest << " numbers with parameter k = " << K << ". Tests primality with accuracy " << (1 - (1/pow(4, K))) << "." << endl; auto begin = Clock::now(); vector<unsigned long long> test; for (int i = 0; i < numTest; i++) { test.push_back(distr(eng)); } unsigned long long *d_nums; bool *d_isPrime; float *d_random_nums; bool *isPrime = new bool[numTest]; cudaMalloc((void **) &d_random_nums, K * sizeof(float)); cudaMalloc((void **) &d_isPrime, numTest * sizeof(bool)); cudaMalloc((void **) &d_nums, numTest * sizeof(unsigned long long)); cudaMemcpy((void *) d_nums, test.data(), numTest * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy((void *) d_random_nums, random_nums, K * sizeof(float), cudaMemcpyHostToDevice); millerRabinPrimalityTest<<<(numTest + NUM_THREADS - 1)/NUM_THREADS, NUM_THREADS>>>(d_nums, test.size(), d_isPrime, K, d_random_nums); cudaMemcpy(isPrime, d_isPrime, numTest * sizeof(bool), cudaMemcpyDeviceToHost); auto end = Clock::now(); for (int i=0; i<numTest; i++) { cout << test[i] << " is prime: " << isPrime[i] << endl; } auto totalDuration = chrono::duration_cast<chrono::nanoseconds>(end - begin).count(); auto avgDuration = ((double) totalDuration) / numTest; cout << "Total Time: " << totalDuration << " nanoseconds" << endl; cout << "Average Time per iteration: " << avgDuration << " nanoseconds" << endl; cudaFree(d_isPrime); cudaFree(d_nums); cudaFree(d_random_nums); delete[] isPrime; } int main(int argc, char const *argv[]) { random_test(); return 0; }
21,206
#include <algorithm> #include <cstdio> #include <math.h> #include <utility> #include <vector> #include <ctime> #include <stdexcept> #include <random> #include <curand_kernel.h> #include <chrono> // for file writing #include <cstdlib> #include <iostream> #include <fstream> #include <unistd.h> using namespace std; unsigned int global_id = 0; unsigned int SIM_TIME_IN_STEPS; const int LEG_STEPS = 1; // [step] number of full cycle steps const double SIM_STEP = 0.025; // [ms] simulation step // stuff variables const int neurons_in_group = 50; // number of neurons in a group const int neurons_in_ip = 196; // number of neurons in a group class Group { public: Group() = default; string group_name; unsigned int id_start{}; unsigned int id_end{}; unsigned int group_size{}; }; // struct for human-readable initialization of connectomes struct SynapseMetadata { unsigned int pre_id; // pre neuron ID unsigned int post_id; // post neuron ID unsigned int synapse_delay; // [step] synaptic delay of the synapse (axonal delay is included to this delay) float synapse_weight; // [nS] synaptic weight. Interpreted as changing conductivity of neuron membrane SynapseMetadata(int pre_id, int post_id, float synapse_delay, float synapse_weight) { this->pre_id = pre_id; this->post_id = post_id; this->synapse_delay = lround(synapse_delay * (1 / SIM_STEP) + 0.5); this->synapse_weight = synapse_weight; } }; // struct for human-readable initialization of connectomes struct GroupMetadata { Group group; float *g_exc; // [nS] array of excitatory conductivity float *g_inh; // [nS] array of inhibition conductivity float *voltage_array; // [mV] array of membrane potential vector<float> spike_vector; // [ms] spike times explicit GroupMetadata(Group group) { this->group = move(group); voltage_array = new float[SIM_TIME_IN_STEPS]; g_exc = new float[SIM_TIME_IN_STEPS]; g_inh = new float[SIM_TIME_IN_STEPS]; } }; __host__ unsigned int ms_to_step(float ms) { return (unsigned int) (ms / SIM_STEP); } __host__ float step_to_ms(int step) { return step * SIM_STEP; } vector <GroupMetadata> all_groups; vector <SynapseMetadata> all_synapses; // form structs of neurons global ID and groups name Group form_group(const string &group_name, int nrns_in_group = neurons_in_group) { Group group = Group(); group.group_name = group_name; // name of a neurons group group.id_start = global_id; // first ID in the group group.id_end = global_id + nrns_in_group - 1; // the latest ID in the group group.group_size = nrns_in_group; // size of the neurons group all_groups.emplace_back(group); global_id += nrns_in_group; printf("Formed %s IDs [%d ... %d] = %d\n", group_name.c_str(), global_id - nrns_in_group, global_id - 1, nrns_in_group); return group; } __device__ float dn(float V, float n) { float a = 0.032 * (15 - V) / (exp((15 - V) / 5) - 1); float b = 0.5 * exp((10 - V) / 40); b = a - (a + b) * n; if (b != b) return 0; return b; } __device__ float dh(float V, float h) { float a = 0.128 * exp((17 - V) / 18); float b = 4 / (1 + exp((40 - V) / 5)); b = a - (a + b) * h; if (b != b) return 0; return b; } __device__ float dm(float V, float m) { float a = 0.32 * (13 - V) / (exp((13 - V) / 4) - 1); float b = 0.28 * (V - 40) / (exp((V - 40) / 5) - 1); b = a - (a + b) * m; if (b != b) return 0; return b; } __global__ void neurons_kernel(float *V_extra, float *V_in, float *V_mid, float *V_out, float *h_in, float *h_mid, float *h_out, float *m_in, float *m_mid, float *m_out, float *n_in, float *n_mid, float *n_out, const float *g_Na, const float *g_K, const float *g_L, float *g_exc, float *g_inh, const double *const_coef1, const double *const_coef2, const double *const_coef3, bool *has_spike, const unsigned short *nrn_ref_time, unsigned short *nrn_ref_time_timer, const int neurons_number, const short EES_activated, const short CV_activated, const bool C0_activated, const bool C0_early_activated, const unsigned int sim_iter, const int decrease_lvl_Ia_spikes, const double sim_step) { /// neuron parameters const float E_Na = 50.0; // [mV] Reversal potential for the Sodium current const float E_K = -90.0; // [mV] Reversal potential for the Potassium current const float E_L = -72.0; // [mV] Reversal potential for the leak current const float E_ex = 50.0; // [mV] Reversal potential for excitatory input const float E_in = -80.0; // [mV] Reversal potential for inhibitory input const float tau_syn_exc = 0.3; // [ms] Decay time of excitatory synaptic current (ms) const float tau_syn_inh = 2.0; // [ms] Decay time of inhibitory synaptic current (ms) const float V_adj = -63.0; // adjusts threshold to around -50 mV -65 const float g_bar = 15000; // [uS] the maximal possible conductivity float I_syn_exc, I_syn_inh; float I_K, I_Na, I_L, V_out_old, dV_mid; /// STRIDE neuron update for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < neurons_number; tid += blockDim.x * gridDim.x) { // reset spike flag of the current neuron before calculations has_spike[tid] = false; // Ia_E/F_aff IDs [1947 ... 2066] [2067 ... 2186] control spike number of Ia afferent by resetting neuron current if (1947 <= tid && tid <= 2186) { // reset current of 1/3 of neurons if (decrease_lvl_Ia_spikes == 1 && tid % 3 == 0) { g_exc[tid] = 0; } else { // reset current of 1/2 of neurons if (decrease_lvl_Ia_spikes == 2 && tid % 2 == 0) { g_exc[tid] = 0; } } } // generate spikes for EES if (tid < 50 && EES_activated) has_spike[tid] = true; // skin stimulations if (!C0_activated) { if (tid == 300 && CV_activated == 1 && (sim_iter % 4 == 0)) has_spike[tid] = true; if (tid == 301 && CV_activated == 2 && (sim_iter % 4 == 0)) has_spike[tid] = true; if (tid == 302 && CV_activated == 3 && (sim_iter % 4 == 0)) has_spike[tid] = true; if (tid == 303 && CV_activated == 4 && (sim_iter % 4 == 0)) has_spike[tid] = true; if (tid == 304 && CV_activated == 5 && (sim_iter % 4 == 0)) has_spike[tid] = true; } // increased barrier for muscles if (3467 <= tid && tid <= 52966) { if (g_exc[tid] > 500000) g_exc[tid] = g_bar; if (g_inh[tid] > 500000) g_inh[tid] = g_bar; } else { if (g_exc[tid] > g_bar) g_exc[tid] = g_bar; if (g_inh[tid] > g_bar) g_inh[tid] = g_bar; } // MN if (1557 <= tid && tid <= 1946 && sim_iter % 50 == 0) { V_in[tid] += 6; } // synaptic currents I_syn_exc = g_exc[tid] * (V_in[tid] - E_ex); I_syn_inh = g_inh[tid] * (V_in[tid] - E_in); V_out_old = V_out[tid]; // muscle if (3467 <= tid && tid <= 52966) { I_syn_exc = g_exc[tid] * (V_in[tid] - 0); I_syn_inh = g_inh[tid] * (V_in[tid] - E_in); } // if neuron in the refractory state -- ignore synaptic inputs. Re-calculate membrane potential if (nrn_ref_time_timer[tid] != 0) {//} || nrn_ref_time_timer[tid] + 10 > nrn_ref_time[tid]) { I_syn_exc = 0; I_syn_inh = 0; } // ionic currents I_K = g_K[tid] * n_in[tid] * n_in[tid] * n_in[tid] * n_in[tid] * (V_in[tid] - E_K); I_Na = g_Na[tid] * m_in[tid] * m_in[tid] * m_in[tid] * h_in[tid] * (V_in[tid] - E_Na); I_L = g_L[tid] * (V_in[tid] - E_L); V_in[tid] += const_coef1[tid] * (const_coef2[tid] * (2 * V_mid[tid] - 2 * V_in[tid]) - I_Na - I_K - I_L - I_syn_exc - I_syn_inh); if (V_in[tid] != V_in[tid]) V_in[tid] = -72; I_K = g_K[tid] * n_mid[tid] * n_mid[tid] * n_mid[tid] * n_mid[tid] * (V_mid[tid] - E_K); I_Na = g_Na[tid] * m_mid[tid] * m_mid[tid] * m_mid[tid] * h_mid[tid] * (V_mid[tid] - E_Na); I_L = g_L[tid] * (V_mid[tid] - E_L); dV_mid = const_coef1[tid] * (const_coef2[tid] * (V_out[tid] - 2 * V_mid[tid] + V_in[tid]) - I_Na - I_K - I_L); V_mid[tid] += dV_mid; if (V_mid[tid] != V_mid[tid]) V_mid[tid] = -72; V_extra[tid] = const_coef3[tid] * (I_K + I_Na + I_L + const_coef1[tid] * dV_mid); I_K = g_K[tid] * n_out[tid] * n_out[tid] * n_out[tid] * n_out[tid] * (V_out[tid] - E_K); I_Na = g_Na[tid] * m_out[tid] * m_out[tid] * m_out[tid] * h_out[tid] * (V_out[tid] - E_Na); I_L = g_L[tid] * (V_out[tid] - E_L); V_out[tid] += const_coef1[tid] * (const_coef2[tid] * (2 * V_mid[tid] - 2 * V_out[tid]) - I_Na - I_K - I_L); if (V_out[tid] != V_out[tid]) V_out[tid] = -72; // use temporary dV variable as V_m with adjust /// transition rates between open and closed states of the potassium channels n_in[tid] += dn(V_in[tid] - V_adj, n_in[tid]) * sim_step; n_mid[tid] += dn(V_mid[tid] - V_adj, n_mid[tid]) * sim_step; n_out[tid] += dn(V_out[tid] - V_adj, n_out[tid]) * sim_step; m_in[tid] += dm(V_in[tid] - V_adj, m_in[tid]) * sim_step; m_mid[tid] += dm(V_mid[tid] - V_adj, m_mid[tid]) * sim_step; m_out[tid] += dm(V_out[tid] - V_adj, m_out[tid]) * sim_step; h_in[tid] += dh(V_in[tid] - V_adj, h_in[tid]) * sim_step; h_mid[tid] += dh(V_mid[tid] - V_adj, h_mid[tid]) * sim_step; h_out[tid] += dh(V_out[tid] - V_adj, h_out[tid]) * sim_step; // re-calculate conductance g_exc[tid] -= g_exc[tid] / tau_syn_exc * sim_step; g_inh[tid] -= g_inh[tid] / tau_syn_inh * sim_step; // threshold && not in refractory period if (nrn_ref_time_timer[tid] == 0 && V_out[tid] >= V_adj + 30.0 && V_out_old > V_out[tid]) { has_spike[tid] = true; // set spike state. It will be used in the "synapses_kernel" nrn_ref_time_timer[tid] = nrn_ref_time[tid]; // set the refractory period } // update the refractory period timer if (nrn_ref_time_timer[tid] > 0) nrn_ref_time_timer[tid]--; } } __global__ void synapses_kernel(const bool *neuron_has_spike, // array of bools -- is neuron has spike or not float *neuron_g_exc, // array of excitatory conductivity per neuron (changable) float *neuron_g_inh, // array of inhibitory conductivity per neuron (changable) const int *synapses_pre_nrn_id, // array of pre neurons ID per synapse const int *synapses_post_nrn_id, // array of post neurons ID per synapse const int *synapses_delay, // array of synaptic delay per synapse int *synapses_delay_timer, // array as above but changable const float *synapses_weight, // array of synaptic weight per synapse const int syn_number) { // number of synapses // ignore threads which ID is greater than neurons number for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < syn_number; tid += blockDim.x * gridDim.x) { // add synaptic delay if neuron has spike if (synapses_delay_timer[tid] == -1 && neuron_has_spike[synapses_pre_nrn_id[tid]]) { synapses_delay_timer[tid] = synapses_delay[tid]; } // if synaptic delay is zero it means the time when synapse increase I by synaptic weight if (synapses_delay_timer[tid] == 0) { // post neuron ID = synapses_post_nrn_id[tid][syn_id], thread-safe (!) if (synapses_weight[tid] >= 0) { atomicAdd(&neuron_g_exc[synapses_post_nrn_id[tid]], synapses_weight[tid]); } else { // remove negative sign atomicAdd(&neuron_g_inh[synapses_post_nrn_id[tid]], -synapses_weight[tid]); } // make synapse timer a "free" for next spikes synapses_delay_timer[tid] = -1; } // update synapse delay timer if (synapses_delay_timer[tid] > 0) { synapses_delay_timer[tid]--; } } } // copy data from host to device template<typename type> void memcpyHtD(type *gpu, type *host, unsigned int size) { cudaMemcpy(gpu, host, sizeof(type) * size, cudaMemcpyHostToDevice); } // copy data from device to host template<typename type> void memcpyDtH(type *host, type *gpu, unsigned int size) { cudaMemcpy(host, gpu, sizeof(type) * size, cudaMemcpyDeviceToHost); } // get datasize of current variable type and its number template<typename type> unsigned int datasize(unsigned int size) { return sizeof(type) * size; } // fill array with current value template<typename type> void fill_array(type *array, unsigned int size, type value) { for (int i = 0; i < size; i++) array[i] = value; } template<typename type> type *init_gpu_arr(type *cpu_var, int size) { type *gpu_var; cudaMalloc(&gpu_var, sizeof(type) * size); memcpyHtD<type>(gpu_var, cpu_var, size); return gpu_var; } template<typename type> type *init_cpu_arr(int size, type val) { type *array = new type[size]; for (int i = 0; i < size; i++) array[i] = val; return array; } template<typename type> type *init_cpu_arr_normal(int size, type mean, type stddev) { random_device r; default_random_engine generator(r()); normal_distribution<float> distr(mean, stddev); auto *array = new type[size]; for (int i = 0; i < size; i++) array[i] = (type) distr(generator); return array; } int get_skin_stim_time(int cms) { if (cms == 21) return 25; if (cms == 15) return 50; return 125; } void bimodal_distr_for_moto_neurons(float *nrn_diameter) { int diameter_active = 27; int diameter_standby = 57; // MN_E [1557 ... 1766] 210 MN_F [1767 ... 1946] 180 int MN_E_start = 1557; int MN_E_end = 1766; int MN_F_start = 1767; int MN_F_end = 1946; int nrn_number_extensor = MN_E_end - MN_E_start + 1; int nrn_number_flexor = MN_F_end - MN_F_start + 1; int standby_percent = 70; int standby_size_extensor = (int) (nrn_number_extensor * standby_percent / 100); int standby_size_flexor = (int) (nrn_number_flexor * standby_percent / 100); int active_size_extensor = nrn_number_extensor - standby_size_extensor; int active_size_flexor = nrn_number_flexor - standby_size_flexor; random_device r1; default_random_engine generator1(r1()); normal_distribution<float> d_active(diameter_active, 3); normal_distribution<float> d_standby(diameter_standby, 6); for (int i = MN_E_start; i < MN_E_start + active_size_extensor; i++) { nrn_diameter[i] = d_active(generator1); } for (int i = MN_E_start + active_size_extensor; i <= MN_E_end; i++) { nrn_diameter[i] = d_standby(generator1); } for (int i = MN_F_start; i < MN_F_start + active_size_flexor; i++) { nrn_diameter[i] = d_active(generator1); } for (int i = MN_F_start + active_size_flexor; i <= MN_F_end; i++) { nrn_diameter[i] = d_standby(generator1); } } void save(int test_index, GroupMetadata &metadata, const string &folder) { ofstream file; string file_name = "/dat/" + to_string(test_index) + "_" + metadata.group.group_name + ".dat"; file.open(folder + file_name); // save voltage for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) file << metadata.voltage_array[sim_iter] << " "; file << endl; // save g_exc for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) file << metadata.g_exc[sim_iter] << " "; file << endl; // save g_inh for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) file << metadata.g_inh[sim_iter] << " "; file << endl; // save spikes for (float const &value: metadata.spike_vector) { file << value << " "; } file.close(); cout << "Saved to: " << folder + file_name << endl; } void save_result(int test_index, int save_all) { string current_path = getcwd(nullptr, 0); printf("[Test #%d] Save %s results to: %s \n", test_index, (save_all == 0) ? "MOTO" : "ALL", current_path.c_str()); for (GroupMetadata &metadata : all_groups) { if (save_all == 0) { if (metadata.group.group_name == "MN_E") save(test_index, metadata, current_path); if (metadata.group.group_name == "MN_F") save(test_index, metadata, current_path); } else { save(test_index, metadata, current_path); } } } void copy_data_to(GroupMetadata &metadata, const float *nrn_v_m, const float *nrn_g_exc, const float *nrn_g_inh, const bool *nrn_has_spike, const unsigned int sim_iter) { float nrn_mean_volt = 0; float nrn_mean_g_exc = 0; float nrn_mean_g_inh = 0; for (unsigned int tid = metadata.group.id_start; tid <= metadata.group.id_end; tid++) { nrn_mean_volt += nrn_v_m[tid]; nrn_mean_g_exc += nrn_g_exc[tid]; nrn_mean_g_inh += nrn_g_inh[tid]; if (nrn_has_spike[tid]) { metadata.spike_vector.push_back(step_to_ms(sim_iter) + 0.25); } } metadata.g_exc[sim_iter] = nrn_mean_g_exc / metadata.group.group_size; metadata.g_inh[sim_iter] = nrn_mean_g_inh / metadata.group.group_size; metadata.voltage_array[sim_iter] = nrn_mean_volt / metadata.group.group_size; } void connect_one_to_all(const Group &pre_neurons, const Group &post_neurons, float syn_delay, float weight) { // Seed with a real random value, if available random_device r; default_random_engine generator(r()); normal_distribution<float> delay_distr(syn_delay, syn_delay / 5); normal_distribution<float> weight_distr(weight, weight / 10); for (unsigned int pre_id = pre_neurons.id_start; pre_id <= pre_neurons.id_end; pre_id++) { for (unsigned int post_id = post_neurons.id_start; post_id <= post_neurons.id_end; post_id++) { all_synapses.emplace_back(pre_id, post_id, delay_distr(generator), weight_distr(generator)); } } printf("Connect %s to %s [one_to_all] (1:%d). Total: %d W=%.2f, D=%.1f\n", pre_neurons.group_name.c_str(), post_neurons.group_name.c_str(), post_neurons.group_size, pre_neurons.group_size * post_neurons.group_size, weight, syn_delay); } void connect_fixed_outdegree(const Group &pre_neurons, const Group &post_neurons, float syn_delay, float syn_weight, int outdegree = 0, bool no_distr = false) { // connect neurons with uniform distribution and normal distributon for syn delay and syn_weight random_device r; default_random_engine generator(r()); uniform_int_distribution<int> id_distr(post_neurons.id_start, post_neurons.id_end); uniform_int_distribution<int> outdegree_num(30, 50); normal_distribution<float> delay_distr_gen(syn_delay, syn_delay / 3); normal_distribution<float> weight_distr_gen(syn_weight, syn_weight / 50); if (outdegree == 0) outdegree = outdegree_num(generator); int rand_post_id; float syn_delay_distr; float syn_weight_distr; for (unsigned int pre_id = pre_neurons.id_start; pre_id <= pre_neurons.id_end; pre_id++) { for (int i = 0; i < outdegree; i++) { rand_post_id = id_distr(generator); syn_delay_distr = delay_distr_gen(generator); if (syn_delay_distr < 0.1) { syn_delay_distr = 0.1; } syn_weight_distr = weight_distr_gen(generator); if (no_distr) { all_synapses.emplace_back(pre_id, rand_post_id, syn_delay, syn_weight); } else { all_synapses.emplace_back(pre_id, rand_post_id, syn_delay_distr, syn_weight_distr); } } } printf("Connect %s to %s [fixed_outdegree] (1:%d). Total: %d W=%.2f, D=%.1f\n", pre_neurons.group_name.c_str(), post_neurons.group_name.c_str(), outdegree, pre_neurons.group_size * outdegree, syn_weight, syn_delay); } void connect_fixed_indegree(const Group &pre_neurons, const Group &post_neurons, float syn_delay, float syn_weight, int indegree = 0, bool no_distr = false) { // connect neurons with uniform distribution and normal distributon for syn delay and syn_weight random_device r; default_random_engine generator(r()); uniform_int_distribution<int> id_distr(pre_neurons.id_start, pre_neurons.id_end); uniform_int_distribution<int> indegree_num(30, 50); normal_distribution<float> delay_distr_gen(syn_delay, syn_delay / 3); normal_distribution<float> weight_distr_gen(syn_weight, syn_weight / 50); if (indegree == 0) indegree = indegree_num(generator); int rand_pre_id; float syn_delay_distr; float syn_weight_distr; for (unsigned int post_id = post_neurons.id_start; post_id <= post_neurons.id_end; post_id++) { for (int i = 0; i < indegree; i++) { rand_pre_id = id_distr(generator); syn_delay_distr = delay_distr_gen(generator); if (syn_delay_distr < 0.1) { syn_delay_distr = 0.1; } syn_weight_distr = weight_distr_gen(generator); if (no_distr) { all_synapses.emplace_back(rand_pre_id, post_id, syn_delay, syn_weight); } else { all_synapses.emplace_back(rand_pre_id, post_id, syn_delay_distr, syn_weight_distr); } } } printf("Connect %s to %s [fixed_INdegree] (%d:1). Total: %d W=%.2f, D=%.1f\n", pre_neurons.group_name.c_str(), post_neurons.group_name.c_str(), indegree, post_neurons.group_size * indegree, syn_weight, syn_delay); } void init_network() { /// groups of neurons Group EES = form_group("EES"); Group E1 = form_group("E1"); Group E2 = form_group("E2"); Group E3 = form_group("E3"); Group E4 = form_group("E4"); Group E5 = form_group("E5"); Group CV1 = form_group("CV1", 1); Group CV2 = form_group("CV2", 1); Group CV3 = form_group("CV3", 1); Group CV4 = form_group("CV4", 1); Group CV5 = form_group("CV5", 1); Group CD4 = form_group("CD4", 1); Group CD5 = form_group("CD5", 1); Group OM1_0 = form_group("OM1_0"); Group OM1_1 = form_group("OM1_1"); Group OM1_2_E = form_group("OM1_2_E"); Group OM1_2_F = form_group("OM1_2_F"); Group OM1_3 = form_group("OM1_3"); Group OM2_0 = form_group("OM2_0"); Group OM2_1 = form_group("OM2_1"); Group OM2_2_E = form_group("OM2_2_E"); Group OM2_2_F = form_group("OM2_2_F"); Group OM2_3 = form_group("OM2_3"); Group OM3_0 = form_group("OM3_0"); Group OM3_1 = form_group("OM3_1"); Group OM3_2_E = form_group("OM3_2_E"); Group OM3_2_F = form_group("OM3_2_F"); Group OM3_3 = form_group("OM3_3"); Group OM4_0 = form_group("OM4_0"); Group OM4_1 = form_group("OM4_1"); Group OM4_2_E = form_group("OM4_2_E"); Group OM4_2_F = form_group("OM4_2_F"); Group OM4_3 = form_group("OM4_3"); Group OM5_0 = form_group("OM5_0"); Group OM5_1 = form_group("OM5_1"); Group OM5_2_E = form_group("OM5_2_E"); Group OM5_2_F = form_group("OM5_2_F"); Group OM5_3 = form_group("OM5_3"); Group MN_E = form_group("MN_E", 210); Group MN_F = form_group("MN_F", 180); Group Ia_E_aff = form_group("Ia_E_aff", 120); Group Ia_F_aff = form_group("Ia_F_aff", 120); Group R_E = form_group("R_E"); Group R_F = form_group("R_F"); Group Ia_E_pool = form_group("Ia_E_pool", neurons_in_ip); Group Ia_F_pool = form_group("Ia_F_pool", neurons_in_ip); Group eIP_E_1 = form_group("eIP_E_1", 40); Group eIP_E_2 = form_group("eIP_E_2", 40); Group eIP_E_3 = form_group("eIP_E_3", 40); Group eIP_E_4 = form_group("eIP_E_4", 40); Group eIP_E_5 = form_group("eIP_E_5", 40); Group eIP_F = form_group("eIP_F", neurons_in_ip); Group iIP_E = form_group("iIP_E", neurons_in_ip); Group iIP_F = form_group("iIP_F", neurons_in_ip); Group muscle_E = form_group("muscle_E", 150 * 210); // 1500 Group muscle_F = form_group("muscle_F", 100 * 180); // 1500 /// E1-5 () connect_fixed_outdegree(EES, E1, 2, 1500); connect_fixed_outdegree(E1, E2, 2, 1500); connect_fixed_outdegree(E2, E3, 2, 1500); connect_fixed_outdegree(E3, E4, 2, 1500); connect_fixed_outdegree(E4, E5, 2, 1500); /// connect_one_to_all(CV3, OM1_3, 0.1, 5100); connect_one_to_all(CV4, OM1_3, 0.1, 5100); connect_one_to_all(CV5, OM1_3, 0.1, 5100); connect_one_to_all(CV4, OM2_3, 0.1, 5100); connect_one_to_all(CV5, OM2_3, 0.1, 5100); connect_one_to_all(CV5, OM3_3, 0.1, 5100); connect_one_to_all(CV5, OM4_3, 0.1, 5100); connect_fixed_outdegree(OM1_2_E, eIP_E_1, 2, 400, neurons_in_ip); connect_fixed_outdegree(OM2_2_E, eIP_E_2, 4, 800, neurons_in_ip); connect_fixed_outdegree(OM3_2_E, eIP_E_3, 4, 800, neurons_in_ip); connect_fixed_outdegree(OM4_2_E, eIP_E_4, 4, 800, neurons_in_ip); connect_fixed_outdegree(OM5_2_E, eIP_E_5, 4, 800, neurons_in_ip); /// [1] level connect_fixed_outdegree(E1, OM1_0, 1, 500); // input from sensory connect_one_to_all(CV1, OM1_0, 0.1, 700); connect_one_to_all(CV2, OM1_0, 0.1, 700); // inner connectomes connect_fixed_outdegree(OM1_0, OM1_1, 0.1, 1300); // 1 connect_fixed_outdegree(OM1_1, OM1_2_E, 1, 1000); // 2 connect_fixed_outdegree(OM1_1, OM1_3, 3, 350); connect_fixed_outdegree(OM1_2_E, OM1_1, 2.5, 820); connect_fixed_outdegree(OM1_1, OM1_1, 2.5, 300); connect_fixed_outdegree(OM1_2_E, OM1_2_E, 2.5, 300); connect_fixed_outdegree(OM1_2_E, OM1_3, 3, 350); connect_fixed_outdegree(OM1_3, OM1_1, 3, -500); connect_fixed_outdegree(OM1_3, OM1_2_E, 3, -500); /// [2] level connect_fixed_outdegree(E2, OM2_0, 0.1, 500); // input from sensory connect_one_to_all(CV2, OM2_0, 0.1, 700); connect_one_to_all(CV3, OM2_0, 0.1, 700); // inner connectomes connect_fixed_outdegree(OM2_0, OM2_1, 0.1, 1300); connect_fixed_outdegree(OM2_1, OM2_2_E, 1, 1100); connect_fixed_outdegree(OM2_1, OM2_3, 3, 350); connect_fixed_outdegree(OM2_2_E, OM2_1, 2.5, 820); connect_fixed_outdegree(OM2_1, OM2_1, 2.5, 300); connect_fixed_outdegree(OM2_2_E, OM2_2_E, 2.5, 300); connect_fixed_outdegree(OM2_2_E, OM2_3, 3, 350); connect_fixed_outdegree(OM2_3, OM2_1, 3, -500); connect_fixed_outdegree(OM2_3, OM2_2_E, 3, -500); /// [3] level connect_fixed_outdegree(E3, OM3_0, 0.1, 700); // 400 // input from sensory connect_one_to_all(CV3, OM3_0, 0.1, 650); connect_one_to_all(CV4, OM3_0, 0.1, 650); // inner connectomes connect_fixed_outdegree(OM3_0, OM3_1, 0.1, 1300); connect_fixed_outdegree(OM3_1, OM3_2_E, 1, 1100); connect_fixed_outdegree(OM3_1, OM3_3, 3, 350); connect_fixed_outdegree(OM3_2_E, OM3_1, 2.5, 820); connect_fixed_outdegree(OM3_1, OM3_1, 2.5, 300); connect_fixed_outdegree(OM3_2_E, OM3_2_E, 2.5, 300); connect_fixed_outdegree(OM3_2_E, OM3_3, 3, 350); connect_fixed_outdegree(OM3_3, OM3_1, 3, -500); connect_fixed_outdegree(OM3_3, OM3_2_E, 3, -500); /// [4] level connect_fixed_outdegree(E4, OM4_0, 0.1, 700); // input from sensory connect_one_to_all(CV4, OM4_0, 0.1, 650); connect_one_to_all(CV5, OM4_0, 0.1, 650); // inner connectomes connect_fixed_outdegree(OM4_0, OM4_1, 0.1, 1300); connect_fixed_outdegree(OM4_1, OM4_2_E, 1, 1000); connect_fixed_outdegree(OM4_1, OM4_3, 3, 330); connect_fixed_outdegree(OM4_2_E, OM4_1, 2.5, 820); connect_fixed_outdegree(OM4_1, OM4_1, 2.5, 320); connect_fixed_outdegree(OM4_2_E, OM4_2_E, 2.5, 320); connect_fixed_outdegree(OM4_2_E, OM4_3, 3, 350); connect_fixed_outdegree(OM4_3, OM4_1, 3, -500); connect_fixed_outdegree(OM4_3, OM4_2_E, 3, -500); /// [5] level connect_fixed_outdegree(E5, OM5_0, 0.1, 700); // input from sensory connect_one_to_all(CV5, OM5_0, 0.1, 700); // inner connectomes connect_fixed_outdegree(OM5_0, OM5_1, 0.1, 1300); connect_fixed_outdegree(OM5_1, OM5_2_E, 1, 1025); connect_fixed_outdegree(OM5_1, OM5_3, 3, 350); connect_fixed_outdegree(OM5_2_E, OM5_1, 2.5, 900); connect_fixed_outdegree(OM5_1, OM5_1, 2.5, 130); connect_fixed_outdegree(OM5_2_E, OM5_2_E, 2.5, 130); connect_fixed_outdegree(OM5_2_E, OM5_3, 3, 350); connect_fixed_outdegree(OM5_3, OM5_1, 3, -1000); connect_fixed_outdegree(OM5_3, OM5_2_E, 3, -1000); /// reflex arc connect_fixed_outdegree(iIP_E, eIP_F, 0.5, -1); connect_fixed_outdegree(iIP_F, eIP_E_1, 0.5, -1); connect_fixed_outdegree(iIP_F, eIP_E_2, 0.5, -1); connect_fixed_outdegree(iIP_F, eIP_E_3, 0.5, -1); connect_fixed_outdegree(iIP_F, eIP_E_4, 0.5, -1); connect_fixed_outdegree(iIP_F, eIP_E_5, 0.5, -1); connect_fixed_outdegree(iIP_E, OM1_2_F, 0.5, -0.5); connect_fixed_outdegree(iIP_E, OM2_2_F, 0.5, -0.5); connect_fixed_outdegree(iIP_E, OM3_2_F, 0.5, -0.5); connect_fixed_outdegree(iIP_E, OM4_2_F, 0.5, -0.5); connect_fixed_outdegree(EES, Ia_E_aff, 0.5, 5000); connect_fixed_outdegree(EES, Ia_F_aff, 0.5, 5000); connect_fixed_outdegree(eIP_E_1, eIP_E_1, 2, 250); connect_fixed_outdegree(eIP_E_2, eIP_E_2, 2, 250); connect_fixed_outdegree(eIP_E_3, eIP_E_3, 2, 250); connect_fixed_outdegree(eIP_E_4, eIP_E_4, 2, 250); connect_fixed_outdegree(eIP_E_5, eIP_E_5, 2, 250); connect_fixed_outdegree(eIP_E_1, MN_E, 1, 150, 200); // 250 connect_fixed_outdegree(eIP_E_2, MN_E, 1, 200, 200); // 250 connect_fixed_outdegree(eIP_E_3, MN_E, 1, 200, 200); // 250 connect_fixed_outdegree(eIP_E_4, MN_E, 1, 200, 200); // 250 connect_fixed_outdegree(eIP_E_5, MN_E, 1, 200, 200); // 250 connect_fixed_outdegree(eIP_F, MN_F, 2, 350, neurons_in_ip); // 250 connect_fixed_outdegree(iIP_E, Ia_E_pool, 1, 1); connect_fixed_outdegree(iIP_F, Ia_F_pool, 1, 1); connect_fixed_outdegree(Ia_E_pool, MN_F, 1, -1); connect_fixed_outdegree(Ia_E_pool, Ia_F_pool, 1, -1); connect_fixed_outdegree(Ia_F_pool, MN_E, 1, -1); connect_fixed_outdegree(Ia_F_pool, Ia_E_pool, 1, -1); connect_fixed_outdegree(Ia_E_aff, MN_E, 0.5, 1000, 200); connect_fixed_outdegree(Ia_F_aff, MN_F, 0.5, 1000, 200); connect_fixed_outdegree(MN_E, MN_E, 2.5, 350); connect_fixed_outdegree(MN_E, R_E, 2, 1); connect_fixed_outdegree(MN_F, R_F, 2, 1); connect_fixed_indegree(MN_E, muscle_E, 1, 1800, 50); connect_fixed_indegree(MN_F, muscle_F, 1, 1800, 50); connect_fixed_outdegree(R_E, MN_E, 2, -0.5); connect_fixed_outdegree(R_E, R_F, 2, -1); connect_fixed_outdegree(R_F, MN_F, 2, -0.5); connect_fixed_outdegree(R_F, R_E, 2, -1); } __host__ void simulate(int cms, int ees, int inh, int ped, int ht5, int save_all, int itest) { // init random distributions random_device r; default_random_engine generator(r()); uniform_real_distribution<float> standard_uniform(0, 1); uniform_real_distribution<float> d_inter_distr(3, 8); uniform_real_distribution<float> d_Ia_aff_distr(10, 20); uniform_real_distribution<float> d_muscle_dist(20, 80); normal_distribution<double> c_m_dist(1, 0.05); normal_distribution<double> c_m_moto_dist(2, 0.06); normal_distribution<double> g_Na_dist(120, 3.7); normal_distribution<double> g_K_dist(36, 2.3); normal_distribution<double> g_L_dist(0.3, 0.033); normal_distribution<double> R_dist(100, 3.1); // const unsigned int skin_stim_time = get_skin_stim_time(cms); const unsigned int T_simulation = 11 * skin_stim_time * LEG_STEPS; // calculate how much steps in simulation time [steps] SIM_TIME_IN_STEPS = ms_to_step(T_simulation); // calculate spike frequency and C0/C1 activation time in steps auto ees_spike_each_step = ms_to_step(1000 / ees); auto steps_activation_C0 = ms_to_step(5 * skin_stim_time); auto steps_activation_C1 = ms_to_step(6 * skin_stim_time); /// init neuron groups and connectomes init_network(); // get the number of bio objects const auto neurons_number = global_id; const auto synapses_number = static_cast<int>(all_synapses.size()); /// CPU variables auto *nrn_v_extra = init_cpu_arr<float>(neurons_number, 0); // [mV] neuron extracellular membrane potential auto *nrn_v_m_in = init_cpu_arr<float>(neurons_number, -72.5); // [mV] input neuron intracellular membrane potential auto *nrn_v_m_mid = init_cpu_arr<float>(neurons_number,-72.5); // [mV] medial neuron intracellular membrane potential auto *nrn_v_m_out = init_cpu_arr<float>(neurons_number,-72.5); // [mV] output neuron intracellular membrane potential auto *nrn_n_in = init_cpu_arr<float>(neurons_number, 0.01); // [0..1] potassium channel activation probability auto *nrn_n_mid = init_cpu_arr<float>(neurons_number, 0.01); // --//-- auto *nrn_n_out = init_cpu_arr<float>(neurons_number, 0.01); // --//-- auto *nrn_h_in = init_cpu_arr<float>(neurons_number, 0.99); // [0..1] sodium channel activation probability auto *nrn_h_mid = init_cpu_arr<float>(neurons_number, 0.99); // --//-- auto *nrn_h_out = init_cpu_arr<float>(neurons_number, 0.99); // --//-- auto *nrn_m_in = init_cpu_arr<float>(neurons_number, 0.01); // [0..1] sodium channel inactivation probability auto *nrn_m_mid = init_cpu_arr<float>(neurons_number, 0.01); // --//-- auto *nrn_m_out = init_cpu_arr<float>(neurons_number, 0.01); // --//-- auto *const_coef1 = init_cpu_arr<double>(neurons_number, 0); // d / (4 * Ra * x * x) auto *const_coef2 = init_cpu_arr<double>(neurons_number, 0); // dt / Cm auto *const_coef3 = init_cpu_arr<double>(neurons_number, 0); // extracellular constant auto *nrn_g_Na = init_cpu_arr<float>(neurons_number, 0); // [nS] auto *nrn_g_K = init_cpu_arr<float>(neurons_number, 0); // [nS] auto *nrn_g_L = init_cpu_arr<float>(neurons_number, 0); // [nS] auto *nrn_g_exc = init_cpu_arr<float>(neurons_number, 0); // [nS] excitatory synapse exponential conductance auto *nrn_g_inh = init_cpu_arr<float>(neurons_number, 0); // [nS] inhibitory synapse exponential conductance auto *nrn_diameter = init_cpu_arr<float>(neurons_number, 0); // [um] neuron diameter auto *nrn_has_spike = init_cpu_arr<bool>(neurons_number, false); // neuron state - has spike or not auto *nrn_ref_time_timer = init_cpu_arr<unsigned short>(neurons_number, 0); // [step] neuron refractory time timer auto *nrn_ref_time = init_cpu_arr_normal<unsigned short>(neurons_number, 3 / SIM_STEP,0.4 / SIM_STEP); // [step] neuron refractory time // synapse variables auto *synapses_pre_nrn_id = init_cpu_arr<int>(synapses_number, 0); // Pre synaptic neuron's ID auto *synapses_post_nrn_id = init_cpu_arr<int>(synapses_number, 0); // Post synaptic neuron's ID auto *synapses_weight = init_cpu_arr<float>(synapses_number, 0); // Synaptic weight [mS] auto *synapses_delay = init_cpu_arr<int>(synapses_number, 0); // Synaptic delay [ms] -> [steps] auto *synapses_delay_timer = init_cpu_arr<int>(synapses_number, -1); // Synaptic delay timer [steps] // CV timing const unsigned int beg_C_spiking[5] = {ms_to_step(0), ms_to_step(skin_stim_time), ms_to_step(2 * skin_stim_time), ms_to_step(3 * skin_stim_time), ms_to_step(5 * skin_stim_time)}; const unsigned int end_C_spiking[5] = {ms_to_step(skin_stim_time - 0.1), ms_to_step(2 * skin_stim_time - 0.1), ms_to_step(3 * skin_stim_time - 0.1), ms_to_step(5 * skin_stim_time - 0.1), ms_to_step(6 * skin_stim_time - 0.1)}; /// Fill the arrays // set by default inter neuron's diameter for all neurons for (int i = 0; i < neurons_number; i++) nrn_diameter[i] = d_inter_distr(generator); const double MICRO = pow(10, -6); const double CENTI = pow(10, -1); const double uF_m2 = pow(10, 4); // 1 microfarad per square centimeter = 10 000 microfarad per square meter const double mS_m2 = pow(10, 4); // 1 millisiemens per square centimeter = 10 000 millisiemens per square meter float Re = 333 * CENTI; // convert [Ohm cm] to [Ohm m] Resistance of extracellular space // set for EES, E1, E2, E3, E4, E5 constant diameter // convert [um] to [m] - diameter for (int i = 0; i < 300; i++) nrn_diameter[i] = 5; // fill array of Ia_aff neuron's diameters for (int i = 1947; i < 2186; i++) nrn_diameter[i] = d_Ia_aff_distr(generator); // set bimodal distribution for motoneurons bimodal_distr_for_moto_neurons(nrn_diameter); // set C_m, g_Na, g_K, g_L arrays based on the neuron's diameters double Ra, x, cm, d; for (int i = 0; i < neurons_number; i++) { // regular interneuron cm = c_m_dist(generator) * uF_m2; // conductivity d = nrn_diameter[i] * MICRO; // compartment diameter x = d / 3; // compartment length nrn_g_Na[i] = g_Na_dist(generator) * mS_m2; // convert [mS / cm2] to [mS / m2] nrn_g_K[i] = g_K_dist(generator) * mS_m2; // convert [mS / cm2] to [mS / m2] nrn_g_L[i] = g_L_dist(generator) * mS_m2; // convert [mS / cm2] to [mS / m2] Ra = R_dist(generator) * CENTI; // convert [Ohm cm] to [Ohm m] // motoneurons if (1557 <= i && i <= 52966) { cm = c_m_moto_dist(generator) * uF_m2; Ra = R_dist(generator) * 2 * CENTI; x = d / 5; // 3 } // muscles if (3467 <= i && i <= 52966) { // nrn_diameter[i] = d_muscle_dist(generator); // cm = c_m_dist(generator) * 3.6 * uF_m2; // nrn_g_Na[i] = 40 * mS_m2; // 10 // g_Na_dist(generator) / 12 // nrn_g_K[i] = 6 * mS_m2; // 1 // nrn_g_L[i] = 0.2 * mS_m2; // Ra = R_dist(generator) / 50 * CENTI; // d = nrn_diameter[i] * MICRO; // compartment diameter // x = d / 2; // compartment length } const_coef1[i] = SIM_STEP / cm; const_coef2[i] = d / (4 * Ra * x * x); // cout << i << "\tD=" << d << "\tCm=" << cm << "\tRa=" << Ra << "\tC1=" << const_coef1[i] << "\tC2=" << const_coef2[i] << "\n"; x /= MICRO; d /= MICRO; const_coef3[i] = (log(sqrt(pow(x, 2) + pow(d, 2)) + x) - log(sqrt(pow(x, 2) + pow(d, 2)) - x)) / (4 * M_PI * x * Re); } // fill arrays of synapses unsigned int syn_id = 0; for (SynapseMetadata metadata : all_synapses) { synapses_pre_nrn_id[syn_id] = metadata.pre_id; synapses_post_nrn_id[syn_id] = metadata.post_id; synapses_delay[syn_id] = metadata.synapse_delay; synapses_weight[syn_id] = metadata.synapse_weight; syn_id++; } all_synapses.clear(); // neuron variables auto *gpu_nrn_v_extra = init_gpu_arr<float>(nrn_v_extra, neurons_number); auto *gpu_nrn_v_m_in = init_gpu_arr<float>(nrn_v_m_in, neurons_number); auto *gpu_nrn_v_m_mid = init_gpu_arr<float>(nrn_v_m_mid, neurons_number); auto *gpu_nrn_v_m_out = init_gpu_arr<float>(nrn_v_m_out, neurons_number); auto *gpu_nrn_n_in = init_gpu_arr<float>(nrn_n_in, neurons_number); auto *gpu_nrn_n_mid = init_gpu_arr<float>(nrn_n_mid, neurons_number); auto *gpu_nrn_n_out = init_gpu_arr<float>(nrn_n_out, neurons_number); auto *gpu_nrn_h_in = init_gpu_arr<float>(nrn_h_in, neurons_number); auto *gpu_nrn_h_mid = init_gpu_arr<float>(nrn_h_mid, neurons_number); auto *gpu_nrn_h_out = init_gpu_arr<float>(nrn_h_out, neurons_number); auto *gpu_nrn_m_in = init_gpu_arr<float>(nrn_m_in, neurons_number); auto *gpu_nrn_m_mid = init_gpu_arr<float>(nrn_m_mid, neurons_number); auto *gpu_nrn_m_out = init_gpu_arr<float>(nrn_m_out, neurons_number); auto *gpu_nrn_g_Na = init_gpu_arr<float>(nrn_g_Na, neurons_number); auto *gpu_nrn_g_K = init_gpu_arr<float>(nrn_g_K, neurons_number); auto *gpu_nrn_g_L = init_gpu_arr<float>(nrn_g_L, neurons_number); auto *gpu_nrn_g_exc = init_gpu_arr<float>(nrn_g_exc, neurons_number); auto *gpu_nrn_g_inh = init_gpu_arr<float>(nrn_g_inh, neurons_number); auto *gpu_const_coef1 = init_gpu_arr<double>(const_coef1, neurons_number); auto *gpu_const_coef2 = init_gpu_arr<double>(const_coef2, neurons_number); auto *gpu_const_coef3 = init_gpu_arr<double>(const_coef3, neurons_number); auto *gpu_nrn_has_spike = init_gpu_arr<bool>(nrn_has_spike, neurons_number); auto *gpu_nrn_ref_time = init_gpu_arr<unsigned short>(nrn_ref_time, neurons_number); auto *gpu_nrn_ref_time_timer = init_gpu_arr<unsigned short>(nrn_ref_time_timer, neurons_number); // synapse variables auto *gpu_syn_pre_nrn_id = init_gpu_arr<int>(synapses_pre_nrn_id, synapses_number); auto *gpu_syn_post_nrn_id = init_gpu_arr<int>(synapses_post_nrn_id, synapses_number); auto *gpu_syn_weight = init_gpu_arr<float>(synapses_weight, synapses_number); auto *gpu_syn_delay = init_gpu_arr<int>(synapses_delay, synapses_number); auto *gpu_syn_delay_timer = init_gpu_arr<int>(synapses_delay_timer, synapses_number); /// preparations for simulation float time; int local_iter = 0; bool C0_activated = false; bool C0_early_activated = false; short CV_activated; bool EES_activated; int shift_time_by_step = 0; int decrease_lvl_Ia_spikes; int shifted_iter_time = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // the main simulation loop for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) { CV_activated = 0; decrease_lvl_Ia_spikes = 0; EES_activated = (sim_iter % ees_spike_each_step == 0); // if flexor C0 activated, find the end of it and change to C1 if (C0_activated) { if (local_iter != 0 && local_iter % steps_activation_C0 == 0) { C0_activated = false; local_iter = 0; shift_time_by_step += steps_activation_C0; } if (local_iter != 0 && (local_iter + 400) % steps_activation_C0 == 0) C0_early_activated = false; // if extensor C1 activated, find the end of it and change to C0 } else { if (local_iter != 0 && local_iter % steps_activation_C1 == 0) { C0_activated = true; local_iter = 0; shift_time_by_step += steps_activation_C1; } if (local_iter != 0 && (local_iter + 400) % steps_activation_C1 == 0) C0_early_activated = true; } shifted_iter_time = sim_iter - shift_time_by_step; if ((beg_C_spiking[0] <= shifted_iter_time) && (shifted_iter_time < end_C_spiking[0])) CV_activated = 1; if ((beg_C_spiking[1] <= shifted_iter_time) && (shifted_iter_time < end_C_spiking[1])) CV_activated = 2; if ((beg_C_spiking[2] <= shifted_iter_time) && (shifted_iter_time < end_C_spiking[2])) CV_activated = 3; if ((beg_C_spiking[3] <= shifted_iter_time) && (shifted_iter_time < end_C_spiking[3])) CV_activated = 4; if ((beg_C_spiking[4] <= shifted_iter_time) && (shifted_iter_time < end_C_spiking[4])) CV_activated = 5; if (CV_activated == 1) decrease_lvl_Ia_spikes = 2; if (CV_activated == 2) decrease_lvl_Ia_spikes = 1; if (CV_activated == 3) decrease_lvl_Ia_spikes = 0; if (CV_activated == 4) decrease_lvl_Ia_spikes = 1; if (CV_activated == 5) decrease_lvl_Ia_spikes = 2; // update local iter (warning: can be resetted at C0/C1 activation) local_iter++; // invoke GPU kernel for neurons neurons_kernel<<<32, 128>>>(gpu_nrn_v_extra, gpu_nrn_v_m_in, gpu_nrn_v_m_mid, gpu_nrn_v_m_out, gpu_nrn_h_in, gpu_nrn_h_mid, gpu_nrn_h_out, gpu_nrn_m_in, gpu_nrn_m_mid, gpu_nrn_m_out, gpu_nrn_n_in, gpu_nrn_n_mid, gpu_nrn_n_out, gpu_nrn_g_Na, gpu_nrn_g_K, gpu_nrn_g_L, gpu_nrn_g_exc, gpu_nrn_g_inh, gpu_const_coef1, gpu_const_coef2, gpu_const_coef3, gpu_nrn_has_spike, gpu_nrn_ref_time, gpu_nrn_ref_time_timer, neurons_number, EES_activated, CV_activated, C0_activated, C0_early_activated, sim_iter, decrease_lvl_Ia_spikes, SIM_STEP); // copy data from GPU memcpyDtH<float>(nrn_v_m_mid, gpu_nrn_v_m_mid, neurons_number); memcpyDtH<float>(nrn_g_exc, gpu_nrn_g_exc, neurons_number); memcpyDtH<float>(nrn_g_inh, gpu_nrn_g_inh, neurons_number); memcpyDtH<float>(nrn_v_extra, gpu_nrn_v_extra, neurons_number); memcpyDtH<bool>(nrn_has_spike, gpu_nrn_has_spike, neurons_number); // fill records arrays for (GroupMetadata &metadata : all_groups) { if (save_all == 0) { if (metadata.group.group_name == "MN_E") copy_data_to(metadata, nrn_v_m_mid, nrn_g_exc, nrn_g_inh, nrn_has_spike, sim_iter); if (metadata.group.group_name == "MN_F") copy_data_to(metadata, nrn_v_m_mid, nrn_g_exc, nrn_g_inh, nrn_has_spike, sim_iter); } else { if (metadata.group.group_name == "MN_E") copy_data_to(metadata, nrn_v_m_mid, nrn_v_extra, nrn_v_extra, nrn_has_spike, sim_iter); else copy_data_to(metadata, nrn_v_m_mid, nrn_g_exc, nrn_g_inh, nrn_has_spike, sim_iter); } } // invoke GPU kernel for synapses synapses_kernel<<<32, 128>>>(gpu_nrn_has_spike, gpu_nrn_g_exc, gpu_nrn_g_inh, gpu_syn_pre_nrn_id, gpu_syn_post_nrn_id, gpu_syn_delay, gpu_syn_delay_timer, gpu_syn_weight, synapses_number); } /// end of the simulation iteration loop cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Time: %d \n", (int) time); cudaDeviceSynchronize(); // tell the CPU to halt further processing until the CUDA has finished doing its business cudaDeviceReset(); // remove all all device allocations (destroy a CUDA context) save_result(itest, save_all); } // runner int main(int argc, char *argv[]) { simulate(21, 40, 100, 2, 0, 1, 0); return 0; }
21,207
#include <cuda_runtime.h> #define WARPS 2 #define WARP_SIZE 32 #define THREADS (WARPS * WARP_SIZE) __shared__ int smem_first[THREADS]; __shared__ int smem_second[WARPS]; __global__ void sumKernel(int *data_in, int *sum_out) { int tx = threadIdx.x; smem_first[tx] = data_in[tx] + tx; if (tx % WARP_SIZE == 0) { int wx = tx / WARP_SIZE; smem_second[wx] = 0; for (int i = 0; i < WARP_SIZE; ++i){ smem_second[wx] += smem_first[wx * WARP_SIZE + i]; } } __syncthreads(); if (tx == 0) { *sum_out = 0; for (int i = 0; i < WARPS; ++i){ *sum_out += smem_second[i]; } } } int main(int argc, char **argv) { int *data_in = NULL; int *sum_out = NULL; cudaMalloc((void**)&data_in, sizeof(int) * THREADS); cudaMalloc((void**)&sum_out, sizeof(int)); cudaMemset(data_in, 0, sizeof(int) * THREADS); sumKernel<<<1, THREADS>>>(data_in, sum_out); cudaDeviceSynchronize(); cudaFree(data_in); cudaFree(sum_out); return 0; }
21,208
#include <stdio.h> #include <stdlib.h> __global__ void max_val(int* d_max, int* arr, int n) { int base = threadIdx.x * n; int max = *(arr + base); printf("In thread %d\n", threadIdx.x); for(int i = base + 1; i < base + n; i++) { if(*(arr + i) > max) { max = *(arr + i); } } *(d_max + threadIdx.x) = max; } /* * stdin takes the following inputs: * - #threads * - Array size * - Array */ int main() { int n; int* arr; int* max; int* d_arr; int* d_max; int threads; scanf("%d", &threads); scanf("%d", &n); arr = (int*) malloc(sizeof(int) * n); max = (int*) malloc(sizeof(int) * threads); for(int i = 0; i < n; i++) { scanf("%d", arr + i); } cudaMalloc((void**) &d_arr, sizeof(int) * n); cudaMalloc((void**) &d_max, sizeof(int) * threads); cudaMemcpy(d_arr, arr, sizeof(int) * n, cudaMemcpyHostToDevice); max_val<<<1,threads>>>(d_max, d_arr, n / threads); cudaMemcpy(max, d_max, sizeof(int) * threads, cudaMemcpyDeviceToHost); int max_val = *(max); for(int i = 1; i < threads; i++) { if(max_val < *(max + i)) { max_val = *(max + i); } } printf("Maximum value is: %d\n", max_val); return 0; }
21,209
#include "includes.h" __global__ void preScan(unsigned int* deviceInput, unsigned int* deviceOutput, int cnt, unsigned int* deviceSum) { extern __shared__ unsigned int temp[]; int cntInB = blockDim.x * 2; int idxInG = cntInB * blockIdx.x + threadIdx.x; int idxInB = threadIdx.x; temp[2 * idxInB] = 0; temp[2 * idxInB +1] = 0; if (idxInG < cnt) { temp[idxInB] = deviceInput[idxInG]; } if (idxInG + blockDim.x < cnt) { temp[idxInB + blockDim.x] = deviceInput[idxInG + blockDim.x]; } int offset = 1; for (int d = cntInB >> 1; d > 0; d>>=1) { __syncthreads(); if (threadIdx.x < d) { int ai = offset - 1 + offset * (threadIdx.x * 2); int bi = ai + offset; temp[bi] += temp[ai]; } offset *= 2; } __syncthreads(); //before clear the last element, move the last element to deviceSums. if (threadIdx.x == 0) { deviceSum[blockIdx.x] = temp[cntInB - 1]; temp[cntInB - 1] = 0; } //downsweep for (int d = 1; d < cntInB; d *=2) { offset >>= 1; __syncthreads(); if (threadIdx.x < d) { int ai = offset - 1 + offset * (threadIdx.x * 2); int bi = ai + offset; unsigned int be = temp[bi]; temp[bi] += temp[ai]; temp[ai] = be; } } if (idxInG < cnt) { deviceOutput[idxInG] = temp[idxInB]; } if (idxInG + blockDim.x < cnt) { deviceOutput[idxInG + blockDim.x] = temp[idxInB + blockDim.x]; } }
21,210
#include "includes.h" /*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/ /*(c) 2016 Brian Tarasinski*/ /*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/ //kernel to transform to pauli basis (up, x, y, down) //to be run on a complete complex density matrix, once for each bit //this operation is its own inverse (can also be used in opposite direction) __global__ void pauli_reshuffle(double *complex_dm, double *real_dm, unsigned int no_qubits, unsigned int direction) { const int x = (blockIdx.x *blockDim.x) + threadIdx.x; const int y = (blockIdx.y *blockDim.y) + threadIdx.y; if ((x >= (1 << no_qubits)) || (y >= (1 << no_qubits))) return; //do we need imaginary part? That is the case if we have an odd number of bits for y in our adress (bit in y is 1, bit in x is 0) unsigned int v = ~x & y; unsigned int py = 0; while (v) { py += v&1; v >>= 1; } py = py & 0x3; //short version: while (v>1) { v = (v >> 1) ^ v ;} //bit bang version /*v ^= v >> 1;*/ /*v ^= v >> 2;*/ /*v = (v & 0x11111111U) * 0x11111111U;*/ /*v = (v >> 28) & 1;*/ const unsigned int addr_complex = (((x << no_qubits) | y) << 1) + (py&1); //the adress in pauli basis is obtained by interleaving unsigned int addr_real = 0; for (int i = 0; i < 16; i++) { addr_real |= (x & 1U << i) << i | (y & 1U << i) << (i + 1); } if(direction == 0) { real_dm[addr_real] = ((py==3 || py==2)? -1 : 1)*complex_dm[addr_complex]; } else { complex_dm[addr_complex] = ((py==3 || py == 2)? -1 : 1)*real_dm[addr_real]; } }
21,211
// cudaHW.cu // // driver and kernel call #include <stdio.h> #define THREADS_PER_BLOCK 256 __global__ void vDotProd_d (int *force, int *distance, int *result, int n) { int x = blockIdx.x * blockDim.x + threadIdx.x; int i = n / 2; if (x < n) { if (x < i) { force[x] = x + 1; } else if (x > i) { force[x] = x - ((x - i) * 2); } else { force[x] = x; } distance[x] = x % 10 + 1; result[x] = force[x] * distance[x]; } } extern "C" void gpuDotProd (int *result_h, int arraySize) { int *force; int *distance; int *result; cudaMalloc ((void**) &force, sizeof(int) * arraySize); cudaMalloc ((void**) &distance, sizeof(int) * arraySize); cudaMalloc ((void**) &result, sizeof(int) * arraySize); vDotProd_d <<< ceil((float) arraySize/THREADS_PER_BLOCK), THREADS_PER_BLOCK >>> (force, distance, result, arraySize); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf ("CUDA error: %s\n", cudaGetErrorString(err)); cudaMemcpy (result_h, result, sizeof(int) * arraySize, cudaMemcpyDeviceToHost); cudaFree (force); cudaFree (distance); cudaFree (result); }
21,212
#include<stdio.h> #include<stdlib.h> #include <stdint.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <time.h> #include <iomanip> #include <iostream> using namespace std; struct BITMAPFILEHEADER{ uint8_t type[2]; uint32_t size; uint16_t reserved1; uint16_t reserved2; uint32_t offsetbits; } __attribute__ ((packed)); struct BITMAPINFOHEADER{ uint32_t size; uint32_t width; uint32_t height; uint16_t planes; uint16_t bitcount; uint32_t compression; uint32_t sizeimage; long xpelspermeter; long ypelspermeter; uint32_t colorsused; uint32_t colorsimportant; } __attribute__ ((packed)); struct myRGB{ uint8_t blue; uint8_t green; uint8_t red; }; struct grayStruct{ uint8_t B; uint8_t G; uint8_t R; }; __global__ void kernel(myRGB* input, grayStruct* gray,int w,int h, int chunksize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int totalsize = w*h; if( idx < totalsize) for( int i = idx*chunksize ; i< idx*chunksize+chunksize && i < totalsize; i++) { int graypixel = (int)(0.33* input[i].red + 0.33* input[i].green + 0.33* input[i].blue); gray[i].R = graypixel;gray[i].G = graypixel;gray[i].B = graypixel; } } int main(int argc, char* argv[]) { clock_t start = clock(); BITMAPFILEHEADER source_head; BITMAPINFOHEADER source_info; FILE *in; FILE *out; if(!(in=fopen(argv[1],"rb"))) { printf("\ncan not open file"); exit(-1); } out=fopen("out_cuda.bmp","wb"); fread(&source_head,sizeof(struct BITMAPFILEHEADER),1,in); fread(&source_info,sizeof(struct BITMAPINFOHEADER),1,in); int total_pixel = source_info.width * source_info.height; myRGB* h_pixel; h_pixel = (myRGB*) malloc (sizeof(myRGB)*total_pixel); fread(h_pixel,sizeof(myRGB),total_pixel,in); myRGB* d_pixel; cudaMalloc((void **)&d_pixel, total_pixel*sizeof(myRGB)); cudaMemcpy(d_pixel, h_pixel,total_pixel*sizeof(myRGB), cudaMemcpyHostToDevice); grayStruct* d_gray; cudaMalloc( (void**) &d_gray, total_pixel*sizeof(grayStruct)); cudaMemset(d_gray,255,total_pixel*sizeof(grayStruct)); const int BLOCK_SIZE = atoi(argv[2]); const int THREAD_SIZE = atoi(argv[3]); int chunksize= ceil( (double)total_pixel/(BLOCK_SIZE*THREAD_SIZE)); cout<< "Chunk Size: " << chunksize<< endl; kernel <<<BLOCK_SIZE ,THREAD_SIZE >>> (d_pixel, d_gray, source_info.width, source_info.height, chunksize); fwrite(&source_head,sizeof(struct BITMAPFILEHEADER),1,out); fwrite(&source_info,sizeof(struct BITMAPINFOHEADER),1,out); cudaDeviceSynchronize(); grayStruct* h_gray = (grayStruct*) malloc (total_pixel*sizeof(grayStruct)); cudaMemcpy(h_gray, d_gray, total_pixel*sizeof(grayStruct), cudaMemcpyDeviceToHost); fwrite(h_gray,sizeof(grayStruct),total_pixel,out); fclose(in); fclose(out); clock_t end = clock(); double t_time = (end - start)/(double)CLOCKS_PER_SEC; cout << "Time: " << std::setprecision(9) << t_time << endl; return 0; }
21,213
#include "includes.h" __global__ void add(int *a, int *r, int *g, int *b, float *gc) { int i = (blockIdx.x*blockDim.x) + threadIdx.x; gc[5120 * 6 + i * 6 ] = b[i] * 0.00390625; //gc[5120 * 6 + i * 6 ] = float(b[i]) / 256; gc[5120 * 6 + i * 6 + 1] = g[i] * 0.00390625; //gc[5120 * 6 + i * 6 + 1] = float(g[i]) / 256; gc[5120 * 6 + i * 6 + 2] = r[i] * 0.00390625; //gc[5120 * 6 + i * 6 + 2] = float(r[i]) / 256; // gc[5120 * 6 + i * 6 + 3] = float(i - ((i>>9)<<9) ); // i%512 //gc[5120 * 6 + i * 6 + 3] = float(i % 512); // gc[5120 * 6 + i * 6 + 4] = float( i >> 9); //gc[5120 * 6 + i * 6 + 4] = float((i - (i % 512)) / 512); // gc[5120 * 6 + i * 6 + 5] = float(a[i]); }
21,214
/* source /opt/cuda6/cuda6.5/cudavars source /opt/gcc/gccvars-4.8.4.sh #CARD="-gencode arch=compute_20,code=compute_20" #compatability back to Fermi (GTX 480); optimisation and immediate-launch for none (gives fastest compile times for development) CARD="-gencode arch=compute_20,code=compute_20 -gencode arch=compute_30,code=compute_30 -gencode arch=compute_30,code=sm_30 -gencode arch=compute_52,code=compute_52 -gencode arch=compute_52,code=sm_52" #compatability back to Fermi (GTX 480); optimisation and immediate-launch for GTX 680 (sm_30) and GTX 980 (sm_52) nvcc ${CARD} -O3 -ccbin g++ -m64 -std=c++11 -o testGPU testGPU.cu #./testGPU qsub testGPU-runs1.sge */ /* source /opt/cuda8/cuda8.0/cudavars source /opt/gcc/gccvars-4.8.4.sh #CARD="-gencode arch=compute_20,code=compute_20" #compatability back to Fermi (GTX 480); optimisation and immediate-launch for none (gives fastest compile times for development) CARD="-gencode arch=compute_20,code=compute_20 -gencode arch=compute_30,code=compute_30 -gencode arch=compute_30,code=sm_30 -gencode arch=compute_52,code=compute_52 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_61,code=compute_61 -gencode arch=compute_61,code=sm_61" #compatability back to Fermi (GTX 480); optimisation and immediate-launch for GTX 680 (sm_30), GTX 980 (sm_52) and GTX 1080 (sm_61) nvcc ${CARD} -O3 -ccbin g++ -m64 -std=c++11 -o testGPU testGPU.cu #./testGPU qsub testGPU-runs1.sge */ #include <chrono> #include <iostream> #include <iomanip> const int REP=2500; const int RUN_TEST=100; //const int RUN_TIMINGFUNC=10; const int RUN_TIMINGFUNC=2400; // est. ~4 hours on GTX 980, ~8 hours on GTX 680 void CUDACALL(cudaError err) { if (err == cudaSuccess) return; std::cerr << "CUDA error: " << cudaGetErrorString(err) << std::endl; exit(EXIT_FAILURE); } __global__ void mykernel(float * __restrict__ a) { int idx = blockIdx.x*blockDim.x + threadIdx.x; float r = a[idx]; #pragma unroll for (int n=0;n<REP;n++) r = 0.0001f+r*1.00002f; a[idx] = r; } void timingfunc(int grid_size, int block_size, float *d_a) { float gpu_ms_mean=0, gpu_ms; // time elapsed according to GPU float cl1_ms_mean=0; // time elapsed according to CPU-side clock, just (kernel + minimal CPU-side timing code) float cl2_ms_mean=0; // time elapsed according to CPU-side clock, all code (will sum to wall clock time) cudaEvent_t gpu_before, gpu_after; CUDACALL(cudaEventCreate(&gpu_before)); CUDACALL(cudaEventCreate(&gpu_after)); for (int run_test=0;run_test<RUN_TEST;run_test++) { CUDACALL(cudaEventRecord(gpu_before, 0)); std::chrono::steady_clock::time_point cl1_before = std::chrono::steady_clock::now(); static auto cl2_before = cl1_before; mykernel <<<grid_size, block_size>>> (d_a); CUDACALL(cudaEventRecord(gpu_after, 0)); CUDACALL(cudaEventSynchronize(gpu_after)); std::chrono::steady_clock::time_point cl_after = std::chrono::steady_clock::now(); CUDACALL(cudaEventElapsedTime(&gpu_ms, gpu_before, gpu_after)); std::chrono::duration<double, std::milli> cl1_time_span = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(cl_after - cl1_before); std::chrono::duration<double, std::milli> cl2_time_span = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(cl_after - cl2_before); cl2_before=cl_after; gpu_ms_mean += gpu_ms/100.0f; cl1_ms_mean += cl1_time_span.count()/100.0f; cl2_ms_mean += cl2_time_span.count()/100.0f; } CUDACALL(cudaEventDestroy(gpu_before)); CUDACALL(cudaEventDestroy(gpu_after)); //std::cout << REP*2 << " FLOP kernel"; //std::cout << "; Mean " << gpu_ms_mean << " ms"; long FLOPs = block_size*grid_size * (long)REP*2; float gpu_GFLOPS = FLOPs / (gpu_ms_mean*1000000); float cl1_GFLOPS = FLOPs / (cl1_ms_mean*1000000); float cl2_GFLOPS = FLOPs / (cl2_ms_mean*1000000); std::cout << std::fixed << std::setprecision(2); std::cout << gpu_GFLOPS << " / " << cl1_GFLOPS << " / " << cl2_GFLOPS << " GFLOPS" << std::endl; } int main() { /* // GTX 480: int grid_size = 15*8;// *32; int block_size = 32*2;// *2; // GTX 680: int grid_size = 8*8;// *32; int block_size = 192*2;// *2; // GTX 980: int grid_size = 16*8;// *32; int block_size = 128*2;// *2; // GTX 1080: int grid_size = 20*8;// *32; int block_size = 128*2;// *2; // Should be good for all of the above: int grid_size = 240*8;// *32; int block_size = 384*2;// not *2 as max x-dimension of a block is 1024 */ // Should be good for all of the above: int grid_size = 240*8*32; int block_size = 384*2;// not *2 as max x-dimension of a block is 1024 long num_bytes = block_size*grid_size * sizeof(float); float *h_a = (float*)malloc(num_bytes); if (h_a == 0) {std::cerr << "malloc error" << std::endl; exit(EXIT_FAILURE);} float *d_a; CUDACALL(cudaMalloc(&d_a, num_bytes)); CUDACALL(cudaMemset(d_a, 0, num_bytes)); for (int run_timingfunc=0;run_timingfunc<RUN_TIMINGFUNC;run_timingfunc++) { timingfunc(grid_size, block_size, d_a); } CUDACALL(cudaMemcpy(h_a, d_a, num_bytes, cudaMemcpyDeviceToHost)); free(h_a); CUDACALL(cudaFree(d_a)); return EXIT_SUCCESS; } /* GTX 980 using CUDA 6.5: [alastair@alastair12 ~]$ nvcc ${CARD} -O3 -ccbin g++ -m64 -std=c++11 -o testGPU testGPU.cu && ./testGPU 4833.60 / 4832.29 / 4832.10 GFLOPS 4833.98 / 4833.70 / 4833.46 GFLOPS 4833.46 / 4833.20 / 4832.97 GFLOPS 4833.51 / 4833.23 / 4833.03 GFLOPS 4833.25 / 4832.99 / 4832.79 GFLOPS 4833.21 / 4832.93 / 4832.73 GFLOPS 4799.90 / 4799.62 / 4799.43 GFLOPS 4766.02 / 4765.74 / 4765.54 GFLOPS 4765.83 / 4765.56 / 4765.36 GFLOPS 4766.19 / 4765.94 / 4765.74 GFLOPS 4764.84 / 4764.56 / 4764.36 GFLOPS 4765.12 / 4764.84 / 4764.63 GFLOPS 4762.39 / 4762.06 / 4761.81 GFLOPS 4764.42 / 4764.12 / 4763.89 GFLOPS 4764.18 / 4763.91 / 4763.69 GFLOPS 4763.87 / 4763.59 / 4763.37 GFLOPS 4764.10 / 4763.83 / 4763.62 GFLOPS 4776.78 / 4776.51 / 4776.32 GFLOPS 4785.27 / 4785.03 / 4784.85 GFLOPS 4786.23 / 4785.98 / 4785.80 GFLOPS 4787.08 / 4786.84 / 4786.66 GFLOPS 4786.52 / 4786.25 / 4786.07 GFLOPS 4786.08 / 4785.84 / 4785.66 GFLOPS 4787.20 / 4786.93 / 4786.73 GFLOPS 4787.47 / 4787.23 / 4787.04 GFLOPS 4786.56 / 4786.31 / 4786.12 GFLOPS 4787.72 / 4787.45 / 4787.27 GFLOPS 4788.01 / 4787.76 / 4787.57 GFLOPS 4788.78 / 4788.50 / 4788.32 GFLOPS 4788.31 / 4788.06 / 4787.88 GFLOPS 4788.25 / 4787.99 / 4787.81 GFLOPS 4788.22 / 4787.97 / 4787.79 GFLOPS 4788.17 / 4787.90 / 4787.72 GFLOPS 4789.13 / 4788.88 / 4788.70 GFLOPS 4788.52 / 4788.26 / 4788.08 GFLOPS 4786.58 / 4786.23 / 4786.01 GFLOPS 4788.91 / 4788.66 / 4788.47 GFLOPS 4788.97 / 4788.73 / 4788.55 GFLOPS 4789.84 / 4789.59 / 4789.41 GFLOPS 4789.99 / 4789.76 / 4789.58 GFLOPS ... */ /* My guess is that the higher than expected performance of a GTX 1080 card (below) is due to the use of 32-bit floating point multiply units in the Special Function Units (SFUs), in addition to the standard functional units ... perhaps giving an extra 4/32 = 12.5% theoretical performance. */ /* GTX 1080 using CUDA 6.5 **with CARD not yet having optimisation and immediate-launch for GTX 1080**: [alastair@alastair12 ~]$ nvcc ${CARD} -O3 -ccbin g++ -m64 -std=c++11 -o testGPU testGPU.cu && ./testGPU 9309.61 / 9305.78 / 9304.97 GFLOPS 9414.90 / 9413.66 / 9412.66 GFLOPS 9424.44 / 9423.22 / 9422.39 GFLOPS 9424.54 / 9423.38 / 9422.56 GFLOPS 9423.89 / 9422.71 / 9421.87 GFLOPS 9413.76 / 9412.58 / 9411.74 GFLOPS 9410.10 / 9408.93 / 9408.08 GFLOPS 9409.76 / 9408.60 / 9407.76 GFLOPS 9410.21 / 9409.05 / 9408.17 GFLOPS 9418.93 / 9417.75 / 9416.91 GFLOPS 9417.13 / 9415.96 / 9415.12 GFLOPS 9418.63 / 9417.44 / 9416.60 GFLOPS 9416.86 / 9415.68 / 9414.81 GFLOPS 9338.27 / 9337.12 / 9336.29 GFLOPS 9338.01 / 9336.84 / 9336.04 GFLOPS 9336.50 / 9335.35 / 9334.54 GFLOPS 9335.29 / 9334.13 / 9333.33 GFLOPS 9344.54 / 9343.36 / 9342.53 GFLOPS 9349.33 / 9348.13 / 9347.29 GFLOPS 9315.99 / 9314.82 / 9314.00 GFLOPS 9291.60 / 9290.42 / 9289.62 GFLOPS 9291.45 / 9290.28 / 9289.47 GFLOPS 9290.18 / 9289.02 / 9288.23 GFLOPS 9289.46 / 9288.27 / 9287.47 GFLOPS 9288.10 / 9286.95 / 9286.15 GFLOPS 9288.66 / 9287.49 / 9286.69 GFLOPS 9290.92 / 9289.74 / 9288.93 GFLOPS 9292.05 / 9290.89 / 9290.08 GFLOPS 9280.50 / 9279.33 / 9278.53 GFLOPS 9239.80 / 9238.64 / 9237.85 GFLOPS 9238.17 / 9237.01 / 9236.22 GFLOPS 9238.42 / 9237.28 / 9236.50 GFLOPS 9239.73 / 9238.55 / 9237.73 GFLOPS 9238.66 / 9237.49 / 9236.68 GFLOPS 9238.32 / 9237.13 / 9236.34 GFLOPS 9238.38 / 9237.22 / 9236.43 GFLOPS 9239.39 / 9238.24 / 9237.43 GFLOPS 9239.75 / 9238.57 / 9237.78 GFLOPS 9239.99 / 9238.80 / 9238.01 GFLOPS 9239.70 / 9238.52 / 9237.75 GFLOPS ... 9109.84 / 9108.70 / 9107.91 GFLOPS 9111.16 / 9109.99 / 9109.19 GFLOPS 9110.61 / 9109.46 / 9108.68 GFLOPS 9113.57 / 9112.40 / 9111.60 GFLOPS 9110.60 / 9109.46 / 9108.68 GFLOPS 9111.35 / 9110.20 / 9109.43 GFLOPS 9112.75 / 9111.61 / 9110.85 GFLOPS 9110.69 / 9109.56 / 9108.79 GFLOPS 9111.97 / 9110.86 / 9110.09 GFLOPS 9112.12 / 9111.00 / 9110.25 GFLOPS 9110.65 / 9109.53 / 9108.77 GFLOPS 9110.62 / 9109.49 / 9108.74 GFLOPS 9111.94 / 9110.83 / 9110.08 GFLOPS 9111.87 / 9110.75 / 9109.99 GFLOPS 9109.50 / 9108.39 / 9107.64 GFLOPS 9110.69 / 9109.56 / 9108.80 GFLOPS 9113.08 / 9111.95 / 9111.14 GFLOPS 9111.69 / 9110.57 / 9109.74 GFLOPS 9111.86 / 9110.74 / 9109.93 GFLOPS 9111.63 / 9110.52 / 9109.76 GFLOPS 9107.44 / 9106.36 / 9105.58 GFLOPS 9113.04 / 9111.94 / 9111.13 GFLOPS 9112.03 / 9110.93 / 9110.12 GFLOPS 9109.63 / 9108.52 / 9107.71 GFLOPS 9112.84 / 9111.76 / 9110.99 GFLOPS 9111.47 / 9110.40 / 9109.61 GFLOPS 9111.63 / 9110.55 / 9109.77 GFLOPS 9110.70 / 9109.60 / 9108.81 GFLOPS 9111.91 / 9110.82 / 9110.04 GFLOPS 9110.48 / 9109.38 / 9108.57 GFLOPS 9110.78 / 9109.71 / 9108.93 GFLOPS 9110.69 / 9109.61 / 9108.84 GFLOPS 9111.01 / 9109.95 / 9109.18 GFLOPS 9110.60 / 9109.53 / 9108.76 GFLOPS 9110.37 / 9109.29 / 9108.51 GFLOPS 9110.81 / 9109.72 / 9108.88 GFLOPS 9110.50 / 9109.42 / 9108.62 GFLOPS 9111.17 / 9110.08 / 9109.28 GFLOPS 9110.49 / 9109.43 / 9108.66 GFLOPS 9110.89 / 9109.83 / 9109.07 GFLOPS ... */ /* GTX 1080 using CUDA 8.0: [alastair@alastair12 ~]$ nvcc ${CARD} -O3 -ccbin g++ -m64 -std=c++11 -o testGPU testGPU.cu && ./testGPU nvcc warning : The 'compute_20', 'sm_20', and 'sm_21' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 9336.36 / 9332.39 / 9330.98 GFLOPS 9497.25 / 9495.93 / 9494.24 GFLOPS 9486.74 / 9485.43 / 9483.89 GFLOPS 9483.04 / 9481.70 / 9480.08 GFLOPS 9481.02 / 9479.70 / 9478.17 GFLOPS 9480.61 / 9479.31 / 9477.82 GFLOPS 9488.93 / 9487.62 / 9486.10 GFLOPS 9490.42 / 9489.09 / 9487.56 GFLOPS 9490.31 / 9488.97 / 9487.47 GFLOPS 9482.12 / 9480.78 / 9479.25 GFLOPS 9426.74 / 9425.43 / 9423.91 GFLOPS 9427.52 / 9426.19 / 9424.70 GFLOPS 9426.72 / 9425.43 / 9423.94 GFLOPS 9427.51 / 9426.21 / 9424.72 GFLOPS 9427.24 / 9425.92 / 9424.42 GFLOPS 9353.34 / 9352.06 / 9350.53 GFLOPS 9353.25 / 9351.96 / 9350.46 GFLOPS 9357.97 / 9356.67 / 9355.16 GFLOPS 9362.79 / 9361.47 / 9359.94 GFLOPS 9362.92 / 9361.63 / 9360.10 GFLOPS 9361.30 / 9359.98 / 9358.48 GFLOPS 9362.52 / 9361.25 / 9359.79 GFLOPS 9361.41 / 9360.12 / 9358.65 GFLOPS 9361.92 / 9360.60 / 9359.13 GFLOPS 9342.27 / 9340.96 / 9339.49 GFLOPS 9310.85 / 9309.59 / 9308.18 GFLOPS 9310.07 / 9308.81 / 9307.34 GFLOPS 9310.45 / 9309.15 / 9307.62 GFLOPS 9310.14 / 9308.87 / 9307.40 GFLOPS 9310.52 / 9309.23 / 9307.74 GFLOPS 9310.50 / 9309.20 / 9307.70 GFLOPS 9310.21 / 9308.92 / 9307.39 GFLOPS 9309.94 / 9308.67 / 9307.17 GFLOPS 9309.05 / 9307.80 / 9306.35 GFLOPS 9310.60 / 9309.33 / 9307.87 GFLOPS 9309.92 / 9308.65 / 9307.22 GFLOPS 9310.48 / 9309.20 / 9307.78 GFLOPS 9310.28 / 9308.99 / 9307.54 GFLOPS 9310.74 / 9309.46 / 9308.01 GFLOPS 9290.50 / 9289.25 / 9287.98 GFLOPS ... 9177.33 / 9176.24 / 9175.48 GFLOPS 9176.52 / 9175.43 / 9174.68 GFLOPS 9176.80 / 9175.72 / 9174.97 GFLOPS 9177.21 / 9176.13 / 9175.37 GFLOPS 9175.25 / 9174.17 / 9173.41 GFLOPS 9176.36 / 9175.26 / 9174.50 GFLOPS 9177.45 / 9176.36 / 9175.60 GFLOPS 9177.45 / 9176.37 / 9175.62 GFLOPS 9177.29 / 9176.18 / 9175.42 GFLOPS 9177.55 / 9176.46 / 9175.71 GFLOPS 9176.14 / 9175.05 / 9174.31 GFLOPS 9176.54 / 9175.47 / 9174.71 GFLOPS 9177.17 / 9176.09 / 9175.33 GFLOPS 9176.00 / 9174.93 / 9174.17 GFLOPS 9176.30 / 9175.21 / 9174.46 GFLOPS 9175.58 / 9174.49 / 9173.72 GFLOPS 9176.64 / 9175.57 / 9174.81 GFLOPS 9176.32 / 9175.23 / 9174.48 GFLOPS 9175.38 / 9174.29 / 9173.51 GFLOPS 9176.35 / 9175.29 / 9174.52 GFLOPS 9176.89 / 9175.81 / 9175.06 GFLOPS 9175.53 / 9174.44 / 9173.64 GFLOPS 9175.92 / 9174.83 / 9174.04 GFLOPS 9174.73 / 9173.66 / 9172.89 GFLOPS 9162.75 / 9161.66 / 9160.90 GFLOPS 9117.35 / 9116.28 / 9115.52 GFLOPS 9116.27 / 9115.19 / 9114.45 GFLOPS 9116.67 / 9115.58 / 9114.84 GFLOPS 9116.94 / 9115.83 / 9115.00 GFLOPS 9116.89 / 9115.81 / 9115.01 GFLOPS 9118.10 / 9117.02 / 9116.27 GFLOPS 9117.04 / 9115.97 / 9115.22 GFLOPS 9116.26 / 9115.19 / 9114.42 GFLOPS 9116.97 / 9115.91 / 9115.16 GFLOPS 9116.97 / 9115.88 / 9115.11 GFLOPS 9116.28 / 9115.20 / 9114.43 GFLOPS 9117.29 / 9116.23 / 9115.49 GFLOPS 9116.72 / 9115.65 / 9114.90 GFLOPS 9115.79 / 9114.71 / 9113.96 GFLOPS 9117.05 / 9115.97 / 9115.22 GFLOPS ... */
21,215
#include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <chrono> #include <cstdlib> #include <iostream> void displayMatrix(int* A, size_t M, size_t N); __global__ void transposeKernel(int* A, int* B, int M, int N) { int i_A = N * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x; } cudaError_t transposeHost(int* h_A, int* h_B, int M, int N) { cudaError_t status = cudaSuccess; cudaEvent_t start, finish; cudaEventCreate(&start); cudaEventCreate(&finish); size_t size = M * N * sizeof(int); size_t pitch; float msecs = 0; int* d_A; int* d_B; const int BLOCK_SIZE = 1024; const int GRID_SIZE = (M - 1) / BLOCK_SIZE + 1; dim3 Dim3Blocks(BLOCK_SIZE, BLOCK_SIZE); dim3 Dim3Grids(N / BLOCK_SIZE, M / BLOCK_SIZE); int i = 0, k = 0; auto begin = std::chrono::high_resolution_clock::now(); while (i < M * N) { for (int j = k; j < M * N; j += N) { h_B[i++] = h_A[j]; } k++; } auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> cputime = end - begin; std::cout << "CPU Elapsed Time: " << cputime.count() << " ms" << std::endl; std::cout << "\n******* CPU *********\n"; displayMatrix(h_A, M, N); displayMatrix(h_B, N, M); std::cout << "\n******* CPU *********\n\n"; status = cudaMalloc((void**)&d_A, size); if (status != cudaSuccess) { std::cerr << "cudaMalloc failed for d_A!\n"; goto Error; } status = cudaMalloc((void**)&d_B, size); if (status != cudaSuccess) { std::cerr << "cudaMalloc failed for d_B!\n"; goto Error; } status = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (status != cudaSuccess) { std::cerr << "cudaMemcpy failed for h_A to d_A.\n"; goto Error; } /* for (int i = 0; i < M * N; i++) { h_B[i] = -1; } */ cudaEventRecord(start); transposeKernel<<<Dim3Grids, Dim3Blocks>>>(d_A, d_B, M, N); cudaDeviceSynchronize(); cudaEventRecord(finish); cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost); cudaEventElapsedTime(&msecs, start, finish); std::cout << "GPU(CUDA) Elapsed Time: " << msecs << "ms\n"; displayMatrix(h_B, N, M); Error: cudaFree(d_A); cudaFree(d_B); return status; } void displayMatrix(int* A, size_t M, size_t N) { for (size_t i = 0; i < M * N; i++) { if (i % N == 0) std::cout << "\n"; std::cout << A[i] << " "; } std::cout << "\n"; } int main(int argc, char** argv) { if (argc == 3) { int M = atoi(argv[1]); int N = atoi(argv[2]); std::cout << "M = " << M << ", N = " << N << "\n"; size_t size = M * N * sizeof(int); int* h_A = (int*)malloc(size); if (h_A == NULL) { std::cerr << "Failed allocating memory for h_A!"; return 1; } int* h_B = (int*)malloc(size); if (h_B == NULL) { std::cerr << "Failed allocating memory for h_B!"; return 3; } for (int i = 0; i < M * N; i++) { // h_A[i] = rand() % 100; h_A[i] = i + 1; } cudaError_t status = transposeHost(h_A, h_B, M, N); if (status != cudaSuccess) { std::cerr << "transposeHost failed!\n"; return 1; } free(h_A); free(h_B); return 0; } }
21,216
/* * dijkstras-test.cu * * Created on: Apr 20, 2015 * Author: luke */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <climits> #include <stdint.h> #include <ctime> void CudaMallocErrorCheck(void** ptr, int size); void DijkstrasSetupCuda(int *V, int *E, int *We, int *sigma, int *F, int *U, int num_v, int num_e); void Extremas(int *V, int *E, int num_v, int num_e, int *extrema_vertex, int source_vertex); void Initialize(int *V, int *E, int num_v, int num_e, int **dev_V, int **dev_E, int **dev_U, int **dev_F, int **dev_sigma, int source); int Minimum(int *U, int *sigma, int *V, int *E, int num_v, int num_e, int *dev_dest, int *dev_src); __global__ void InitializeGPU(int *V, int *E, int *U, int *F, int *sigma, int src, int size_v, int size_e); __global__ void Relax(int *U, int *F, int *sigma, int *V, int *E, int num_v, int num_e); __global__ void Update(int *U, int *F, int *sigma, int delta, int size); __global__ void reduce(int *g_idata, int *g_odata, unsigned int n, int *U, int *sigma); __global__ void reduce_fix(int *g_idata, int *g_odata, unsigned int n, unsigned int s_size, unsigned int loops, int *U, int *sigma); uint32_t NearestPowerTwo(uint32_t N); uint32_t NearestPowerBase(uint32_t N, uint32_t base, uint32_t &power); // Generate V_a, E_a, Start_a, End_a, Weight_a int main(int argc, char **argv) { // Initialize graph int V[] = {0, 1, 5, 7, 9}; int E[] = {1, 0, 2, 3, 4, 1, 4, 1, 4, 1, 2, 3}; int Sv[] = {0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4}; int Ev[] = {1, 0, 2, 3, 4, 1, 4, 1, 4, 1, 2, 3}; int We[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; // Initialize Unsettled, Frontier, Sigma function int sigma[]= {0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; // -1 = inf int F[] = {1, 0, 0, 0, 0}; int U[] = {0, 1, 1, 1, 1}; DijkstrasSetupCuda(V, E, We, sigma, F, U, 5, 12); } void DijkstrasSetupCuda(int *V, int *E, int *We, int *sigma, int *F, int *U, int num_v, int num_e) { int extrema_vertex; Extremas(V, E, num_v, num_e, &extrema_vertex, 0); } void Extremas(int *V, int *E, int num_v, int num_e, int *extrema_vertex, int source_vertex) { // Define Unsettled sigma and Frontier nodes int *dev_U, *dev_sigma, *dev_F, *dev_V, *dev_E, *dev_src, *dev_dest; int delta = 0; float elapsedTime=0; // Initialize reduce function mem CudaMallocErrorCheck((void**)&dev_src, num_v*sizeof(int)); CudaMallocErrorCheck((void**)&dev_dest, num_v*sizeof(int)); Initialize(V, E, num_v, num_e, &dev_V, &dev_E, &dev_U, &dev_F, &dev_sigma, source_vertex); // Relax<<<1, 5>>>(dev_U, dev_F, dev_sigma, dev_V, dev_E, num_v, num_e); // int test = Minimum(dev_U, dev_sigma, dev_V, dev_E, num_v, num_e, dev_dest, dev_src); // Update<<<1,5>>>(dev_U, dev_F, dev_sigma, test, num_v); // printf("Test: %d\n", test); // cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); while (delta != INT_MAX) { Relax<<<1, 5>>>(dev_U, dev_F, dev_sigma, dev_V, dev_E, num_v, num_e); delta = Minimum(dev_U, dev_sigma, dev_V, dev_E, num_v, num_e, dev_dest, dev_src); Update<<<1, 5>>>(dev_U, dev_F, dev_sigma, delta, num_v); } cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsedTime, start, end); printf("Elapsed Time: %f\n", elapsedTime); cudaEventDestroy(start); cudaEventDestroy(end); int sigma[num_v]; // int V_t[num_v]; // int U_t[num_v]; cudaMemcpy(sigma, dev_sigma, num_v*sizeof(int), cudaMemcpyDeviceToHost); // cudaMemcpy(V_t, dev_F, num_v*sizeof(int), cudaMemcpyDeviceToHost); // cudaMemcpy(U_t, dev_U, num_v*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < num_v; ++i) { printf("Sigma[%d] : %d\n", i, sigma[i]); // printf("Frontier[%d] : %d\n", i, V_t[i]); // printf("Unsettled[%d]: %d\n", i, U_t[i]); } } void Initialize(int *V, int *E, int num_v, int num_e, int **dev_V, int **dev_E, int **dev_U, int **dev_F, int **dev_sigma, int source) { // Allocate the device memory CudaMallocErrorCheck((void**)dev_V, num_v*sizeof(int)); CudaMallocErrorCheck((void**)dev_E, num_e*sizeof(int)); CudaMallocErrorCheck((void**)dev_U, num_v*sizeof(int)); CudaMallocErrorCheck((void**)dev_F, num_v*sizeof(int)); CudaMallocErrorCheck((void**)dev_sigma, num_v*sizeof(int)); // copy graph to device cudaMemcpy(*dev_V, V, num_v*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(*dev_E, E, num_e*sizeof(int), cudaMemcpyHostToDevice); // initialize Frontier // Initialize Unselttled // Initialize Sigma distance function int threads_per_block, blocks_per_dim; blocks_per_dim = num_v / 1024 + 1; threads_per_block = num_v / blocks_per_dim; InitializeGPU<<<blocks_per_dim, threads_per_block>>>(*dev_V, *dev_E, *dev_U, *dev_F, *dev_sigma, source, num_e, num_v); } __global__ void InitializeGPU(int *V, int *E, int *U, int *F, int *sigma, int src, int size_v, int size_e) { int offset = blockDim.x * blockIdx.x + threadIdx.x; int U_t, F_t, sigma_t; if (offset < size_v) { U_t = 1; F_t = 0; sigma_t = INT_MAX - 1; if (offset == src) { U_t = 0; F_t = 1; sigma_t = 0; } U[offset] = U_t; F[offset] = F_t; sigma[offset] = sigma_t; } } __global__ void Relax(int *U, int *F, int *sigma, int *V, int *E, int num_v, int num_e) { int offset = blockDim.x * blockIdx.x + threadIdx.x; if (offset < num_v) { if (F[offset] == 1) { for (int i = V[offset]; i < V[offset+1] && i < num_e; ++i) { if (U[E[i]] == 1) { atomicMin(&sigma[E[i]], sigma[offset] + 1); } } } } } __global__ void Update(int *U, int *F, int *sigma, int delta, int size) { int offset = blockDim.x * blockIdx.x + threadIdx.x; if (offset < size){ F[offset] = 0; if (U[offset] == 1 && sigma[offset] <= delta) { U[offset] = 0; F[offset] = 1; } } } int Minimum(int *U, int *sigma, int *V, int *E, int num_v, int num_e, int *dev_dest, int *dev_src) { uint32_t blocks = (num_v+1) / 1024 + 1; uint32_t threads = (num_v+1) / blocks / 2; uint32_t loops; uint32_t n_multiple = NearestPowerBase(num_v, threads * blocks * 2, loops); uint32_t dev_dest_size = NearestPowerTwo(blocks*loops); uint32_t share = NearestPowerTwo(threads); // printf("Blocks: %d, Threads:%d\n", blocks, threads); reduce_fix<<<blocks, threads, share*sizeof(int)>>>(V, dev_dest, n_multiple, share, loops, U, sigma); // Recall GPU function: Assumption Destination is power of 2. calculate block // and threads for each call. // GPU Call loop until Threshold if (dev_dest_size > 1024) { threads = 512; blocks = dev_dest_size / threads / 2; } else { threads = dev_dest_size / 2; blocks = 1; } while (dev_dest_size > 1) { int * temp = dev_dest; dev_dest = dev_src; dev_src = temp; reduce<<<blocks, threads, threads*sizeof(int)>>>(dev_src, dev_dest, dev_dest_size, U, sigma); dev_dest_size = blocks; if (dev_dest_size > 1024) { threads = 512; blocks = dev_dest_size / threads / 2; } else { threads = dev_dest_size / 2; blocks = 1; } } int result; cudaMemcpy(&result, dev_dest, sizeof(int), cudaMemcpyDeviceToHost); return result; } void CudaMallocErrorCheck(void** ptr, int size) { cudaError_t err = cudaMalloc(ptr, size); if (err != cudaSuccess) { printf("Error: %s", cudaGetErrorString(err)); exit(1); } } uint32_t NearestPowerTwo(uint32_t N) { uint32_t result = 1; while (result < N) { result <<= 1; } return result; } uint32_t NearestPowerBase(uint32_t N, uint32_t base, uint32_t &power) { uint32_t result = base; power = 1; while (result < N) { result += base; power++; } return result; } __global__ void reduce(int *g_idata, int *g_odata, unsigned int n, int *U, int *sigma) { // Pointer to shared memory extern __shared__ int share_mem[]; unsigned int thread_id = threadIdx.x; unsigned int block_id = blockIdx.x; unsigned int block_dim = blockDim.x; unsigned int offset = block_id*block_dim*2 + thread_id; // Temp result float int result = (offset < n && U[offset] == 1) ? g_idata[offset] : INT_MAX; // Perform summation if (offset + block_dim < n && U[offset + block_dim] == 1) result = min(result, g_idata[offset+block_dim]); share_mem[thread_id] = result; // Sync Threads in a single Block __syncthreads(); // store result to shared memory for (unsigned int s=block_dim/2; s>0; s>>=1) { if (thread_id < s) { share_mem[thread_id] = result = min(result, share_mem[thread_id + s]); } __syncthreads(); } // Store result to output data pointer if (thread_id == 0) g_odata[block_id] = result; } __global__ void reduce_fix(int *g_idata, int *g_odata, unsigned int n, unsigned int s_size, unsigned int loops, int *U, int *sigma) { // Pointer to shared memory extern __shared__ int share_mem[]; unsigned int thread_id = threadIdx.x; for (int i = 0; i < loops; ++i) { unsigned int offset = blockIdx.x*blockDim.x*2 + threadIdx.x + blockDim.x * 2 * gridDim.x * i; // Temp result float int result = (offset < n && U[offset] == 1) ? g_idata[offset] : INT_MAX; // Perform summation if (offset + blockDim.x < n && U[offset + blockDim.x] == 1) result = min(result, g_idata[offset+blockDim.x]); share_mem[thread_id] = result; // printf("Result: %d\n", result); // Sync Threads in a single Block int delta = s_size - blockDim.x; if (thread_id + delta > blockDim.x-1) { share_mem[thread_id+delta] = INT_MAX; } __syncthreads(); // store result to shared memory for (unsigned int s=s_size/2; s>0; s>>=1) { if (thread_id < s) { share_mem[thread_id] = result = min(result, share_mem[thread_id + s]); } __syncthreads(); } // Store result to output data pointer if (thread_id == 0) { g_odata[blockIdx.x+ gridDim.x*i] = result; } } }
21,217
//#define DEBUG #include <cuda.h> #include <stdlib.h> #include <stdio.h> #ifdef DEBUG cudaError_t status; void checkCuda(cudaError_t& status) { status = cudaGetLastError(); if (status == cudaSuccess) { fprintf(stderr, "Success!\n"); } else { fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(status)); exit(-1); } } #endif __global__ void CUDACross(bool *candidates, int size){ for (int idx = blockIdx.x*blockDim.x + threadIdx.x; idx < size/2 + 1; idx += blockDim.x * gridDim.x) { int multiplier = idx + 2; int check = multiplier * multiplier; // bang when `multiplier` reaches ceil(sqrt(2^31)) = 46341 //if (candidates[multiplier-2]) { // which is when `N` gets to (46341-2-1)*2 + 2 = 92678 while (check < size + 2){ candidates[check - 2] = false; check += multiplier; } //} } } void init(bool *candidates, int size){ for (int i = 0; i<size; i++) candidates[i] = true; } int main(int argc, char* argv[]) { /*if (argc != 2 || atoi(argv[1]) < 2 || atoi(argv[1]) > 1000000) { fprintf(stderr, "bad input\nusage: $ ./seqgenprimes N\nwhere N is in [2, 1000000]"); exit(-1); }*/ int N = atoi(argv[1]); int size = N - 1; bool* candidates = new bool[size]; init(candidates, size); int deviceNum = 0; cudaSetDevice(deviceNum); struct cudaDeviceProp prop; cudaGetDeviceProperties(&prop, deviceNum); int dimBlock = prop.maxThreadsPerBlock / 4; int dimGrid = prop.multiProcessorCount * 32; #ifdef DEBUG fprintf(stderr, "maxThreadsPerBlock is %d\n", prop.maxThreadsPerBlock); fprintf(stderr, "maxThreadsPerMultiProcessor is %d\n", prop.maxThreadsPerMultiProcessor); fprintf(stderr, "totalGlobalMem is %d\n", prop.totalGlobalMem); #endif //Initialize arrays bool *gpudata; //Allocate memory cudaMalloc((void**)&gpudata, sizeof(bool)*size); #ifdef DEBUG fprintf(stderr, "checking cudaMalloc()...\n"); checkCuda(status); #endif //Copy to GPU cudaMemcpy(gpudata, candidates, sizeof(bool)*size, cudaMemcpyHostToDevice); #ifdef DEBUG fprintf(stderr, "checking cudaMemcpy() host to device...\n"); checkCuda(status); #endif //Kernel call on the GPU // CUDACross<<<bNum, tNum>>>(gpudata, size, bNum, tNum); CUDACross<<<dimGrid, dimBlock>>>(gpudata, size); // CUDACross<<<dimGrid, dimBlock>>>(gpudata, size, N); #ifdef DEBUG fprintf(stderr, "checking kernel...\n"); checkCuda(status); #endif //Copy from GPU back onto host cudaMemcpy(candidates, gpudata, sizeof(bool)*size, cudaMemcpyDeviceToHost); #ifdef DEBUG fprintf(stderr, "checking cudaMemcpy() device to host...\n"); checkCuda(status); #endif //Free the memory on the GPU cudaFree(gpudata); char filename[20]; sprintf(filename, "%d.txt", N); FILE *fp = fopen(filename, "w"); fprintf(fp, "%d ", 2); #ifdef DEBUG fprintf(stderr, "%d ", 2); #endif for (int i = 1; i < size; ++i) { if (candidates[i]) fprintf(fp, "%d ", i+2); #ifdef DEBUG if (candidates[i]) fprintf(stderr, "%d ", i+2); #endif } return 0; }
21,218
// put a kernel here!
21,219
#include <cstdio> int main() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); printf("Device name: %s\n", deviceProp.name); printf("Total global memory: %ld\n", deviceProp.totalGlobalMem); printf("Shared memory per block: %ld\n", deviceProp.sharedMemPerBlock); printf("Registers per block: %ld\n", deviceProp.regsPerBlock); printf("Warp size: %ld\n", deviceProp.warpSize); printf("Memory pitch: %ld\n", deviceProp.memPitch); printf("Max threads per block: %ld\n", deviceProp.maxThreadsPerBlock); printf("Max threads dimensions: x = %ld, y = %ld, z = %ld\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf("Max grid size: x = %ld, y = %ld, z = %ld\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf("Clock rate: %ld\n", deviceProp.clockRate); printf("Total constant memory: %ld\n", deviceProp.totalConstMem); printf("Compute capability: %ld.%ld\n", deviceProp.major, deviceProp.minor); printf("Texture alignment: %ld\n", deviceProp.textureAlignment); printf("Device overlap: %ld\n", deviceProp.deviceOverlap); printf("Multiprocessor count: %ld\n", deviceProp.multiProcessorCount); printf("Kernel execution timeout enabled: %s\n", deviceProp.kernelExecTimeoutEnabled ? "true" : "false"); scanf(""); }
21,220
#include "includes.h" __global__ void transpose(double *in_d, double * out_d, int row, int col) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; out_d[y+col*x] = in_d[x+row*y]; }
21,221
#include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <malloc.h> // Estructura que contiene datos de la imagen typedef struct image{ char *data; int cols; int rows; int depth; } image; // Funcion ejecutada en la GPU __global__ void negativo(char *input_image, char *output_image, int nRows, int nCols){ int r = blockIdx.x + threadIdx.x; int i; // Cada thread ejecuta una fila completa for (i = 0; i < nCols; i++){ output_image[nCols*r + i] = 255 - input_image[nCols*r + i]; } } // Functiones utiles int read_pgm(char *input_name, image *img); int write_pgm(char *output_name, image *img); // Function main int main(int argc, char **argv){ image lena; // Lee imagen read_pgm(argv[1], &lena); char *pt = lena.data; int ndata = lena.rows*lena.cols; /* // Programa en C int i; for (i = 0; i < lena.rows*lena.cols; i++){ pt[i] = 255 - pt[i]; } */ // Programa en CUDA char *device_input_image = NULL; char *device_output_image = NULL; cudaMalloc((void **) &device_input_image , ndata*sizeof(char)); cudaMalloc((void **) &device_output_image , ndata*sizeof(char)); cudaMemcpy(device_input_image, pt, ndata*sizeof(char), cudaMemcpyHostToDevice); // Invocando a la funcion (blocks, threads) int nBlocks = lena.rows/512; int nThreads = 512; negativo<<<nBlocks, nThreads>>>(device_input_image, device_output_image, lena.rows, lena.cols); cudaMemcpy(pt, device_output_image, ndata*sizeof(char), cudaMemcpyDeviceToHost); // Guarda imagen write_pgm(argv[2], &lena); return 0; } int read_pgm(char *input_name, image *img){ FILE *input_fd = fopen(input_name, "r+"); if(input_fd == NULL) { printf("Error al abrir el archivo : %s\n", input_name); exit(1); } char row[256]; fscanf(input_fd, "%s\n", row); if (strncmp(row, "P5", 2) != 0){ printf("El archivo no es PGM\n"); exit(1); } char s_rows[3], s_cols[3], s_depth[3]; int rows, cols, depth; fscanf(input_fd,"%s\n",row); fgets(row, 256, input_fd); fscanf(input_fd,"%s\n", s_cols); fscanf(input_fd,"%s\n", s_rows); fscanf(input_fd,"%s\n", s_depth); rows = atoi(s_rows); cols = atoi(s_cols); depth = atoi(s_depth); img->data = (char*) malloc(rows*cols); img->cols = cols; img->rows = rows; img->depth = depth; fread(img->data, sizeof(char), rows*cols, input_fd); fclose(input_fd); return 1; } int write_pgm(char *output_name, image *img){ FILE *output_fd; int ndata = img->rows*img->cols; output_fd = fopen(output_name, "w"); fprintf(output_fd, "%s\n", "P5"); fprintf(output_fd, "#\n"); fprintf(output_fd, "%i %i\n", img->rows, img->cols); fprintf(output_fd, "%i\n", img->depth); fwrite(img->data, sizeof(char), ndata, output_fd); fclose(output_fd); return 1; }
21,222
#include "includes.h" __global__ void sqrt_kernel_large(float* x, unsigned int len, unsigned int rowsz) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * rowsz; if (idx < len) x[idx] = sqrt(x[idx]); }
21,223
#include "includes.h" int row = 0; int col = 0; using namespace std; __global__ __global__ void gpu_transpose(float *dst, float *A, int col, int row) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<col){ for (int j=0; j<row; j++){ dst[j*col+idx] = A[idx*row+j]; } } }
21,224
#include <iostream> #include <cuda_runtime.h> using namespace std; int get_GPU_Rate() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp,0); return deviceProp.clockRate; } __global__ void Xor(int a,int b,int *result_device,clock_t* time){ clock_t start = clock(); int c; *result_device+=a^b; *time = clock() - start; } int main(){ int *result_device; cudaMalloc((void**) &result_device, sizeof(int)); clock_t* time; cudaMalloc((void**) &time, sizeof(clock_t)); cudaEvent_t start_device, stop_device; float time_device; cudaEventCreate(&start_device); cudaEventCreate(&stop_device); cudaEventRecord( start_device, 0 ); Xor<<<1,1>>>(1,-1,result_device,time); cudaEventRecord( stop_device, 0 ); cudaEventSynchronize( stop_device ); cudaEventElapsedTime( &time_device, start_device, stop_device ); cudaEventDestroy( start_device ); cudaEventDestroy( stop_device ); cout<<"gputime="<<time_device<<"ms"<<endl; clock_t time_used; cudaMemcpy(&time_used, time, sizeof(clock_t),cudaMemcpyDeviceToHost); cout<<"time="<<time_used<<endl; int result_host; cudaMemcpy(&result_host, result_device, sizeof(int),cudaMemcpyDeviceToHost); cudaFree(result_device); cout<<result_host<<endl; return 0; }
21,225
#include "FeedForward.cuh" // The feedforward algorithm propogates the inputs given forward. // Because these are dependant on the layer before it, the threads must be called // layer by layer. Furthermore, this can only be parallelized with one thread per // output because it is an nx1 input and has a race condition in that the // weights/biases can overwrite one another if called to update the same neuron // in parallel. /** * The kernal for the feedforward algorithm * Parameter nn: the neural network for the feedforward alforithm * Parameter layer: the layer that is being evaluated * Parameter outputs: the output matrix * Return: nothing */ __global__ void feedForwardKernel(NeuralNet* nn, int layer, double*** outputs){ // Loops through the output neurons for(int neuron2 = threadIdx.x + (blockIdx.x * blockDim.x); neuron2 < nn->neurons[layer]; neuron2 += blockDim.x*gridDim.x){ // Sets the initial output to 0 (*outputs)[layer][neuron2] = 0; // Loops through the input neurons and multiplies the weights * the inputs for(int neuron1 = 0; neuron1 < nn->neurons[layer-1]; neuron1++){ (*outputs)[layer][neuron2] += (nn->weights[layer-1][neuron1][neuron2] * (*outputs)[layer-1][neuron1]); } // Adds the bias (*outputs)[layer][neuron2]+=nn->biases[layer-1][neuron2]; switch(nn->activations[layer-1][neuron2]){ case BINARY_STEP: if((*outputs)[layer][neuron2] <=0){ (*outputs)[layer][neuron2] = 0; } else{ (*outputs)[layer][neuron2] = 1; } break; case LOGISTIC: (*outputs)[layer][neuron2] = 1/(1+exp(-1*(*outputs)[layer][neuron2])); break; case TANH: (*outputs)[layer][neuron2] = tanh((*outputs)[layer][neuron2]); break; case RELU: if((*outputs)[layer][neuron2] < 0){ (*outputs)[layer][neuron2] = 0; } break; case LEAKYRELU: if((*outputs)[layer][neuron2] < 0){ (*outputs)[layer][neuron2] *= .01; } break; } } } /** * Feeds the inputs forward through the neural network. * Parameter nn: the neural network to feed the inputs through * Parameter outputs: a reference to the output matrix * Parameter inputs: the array of inputs * Returns: nothing */ void feedForward(NeuralNet* nn, double*** outputs, double* inputs){ // Loops through the layers for(int layer = 0; layer < nn->layers; layer++){ // If it is the input layer if(layer == 0){ // Sets the input layer to the inputs for(int input = 0; input < nn->neurons[layer]; input++){ (*outputs)[layer][input] = inputs[input]; } } else{ // Calls the feedforward kernel feedForwardKernel<<<NUMBLOCKS, BLOCKSIZE>>>(nn, layer, outputs); cudaDeviceSynchronize(); } } } /** * Makes the expected output matrix * Parameter nn: the neural net to make the output matrix from * Parameter numOutputs: the number of outputs to create * Return: the output matrix created */ double*** makeExpected(NeuralNet* nn, int numOutputs){ double*** outputs; cudaMallocManaged(&outputs, numOutputs * sizeof(double**)); for(int output = 0; output < numOutputs; output++){ cudaMallocManaged(&outputs[output], nn->layers * sizeof(double*)); for(int layer=0; layer < nn->layers; layer++){ cudaMallocManaged(&outputs[output][layer], nn->neurons[layer] * sizeof(double)); } } return outputs; } /** * Makes the actual outputs * Parameter nn: the neural network to get the outputs for * Parameter numOutputs: the number of outputs to get * Returns: the matrix representing the actual outputs corresponding to * the structure of the neural network */ double** makeActual(NeuralNet* nn, int numOutputs){ double** outputs; cudaMallocManaged(&outputs, numOutputs*sizeof(double*)); for(int output=0; output<numOutputs; output++){ cudaMallocManaged(&outputs[output], nn->neurons[nn->layers-1] * sizeof(double)); } return outputs; } /** * Frees the output matrix * Parameter nn: the neural network * Parameter outputs: the outout matrix to free * Parameter numOutputs: the number of outputs in the matrix * Returns: nothing */ void freeOutputs(NeuralNet* nn, double*** outputs, int numOutputs){ for(int output=0; output < numOutputs; output++){ for(int layer=0; layer < nn->layers; layer++){ cudaFree(outputs[output][layer]); } cudaFree(outputs[output]); } cudaFree(outputs); }
21,226
const double LATTICE_SPEED = 0.1; const double TAU = 0.9; const int DIRECTIONS = 9; const int DIMENSIONS = 2; #define KERNEL_HEADER(xvar, yvar, wvar, hvar) \ const int x = blockIdx.x;\ const int y = blockIdx.y;\ const int width = gridDim.x;\ const int height = gridDim.y __global__ void stream(double *out, double *in) { KERNEL_HEADER(x, y, width, height); int z = 0; for(int i = -1; i <= 1; i++) { for(int j = -1; j <= 1; j++) { const int target = z + y * DIRECTIONS + x * DIRECTIONS * height; // Compute source index. int xs = x + i; int ys = y + j; const int source = z + ys * DIRECTIONS + xs * DIRECTIONS * height; if(xs >= 0 && xs < width && ys >= 0 && ys < height) { out[target] = in[source]; } else { // If the check yielded out of bounds, that means that the // node we're computing for is on an edge. Thus, we should // use bounce-back -- instead of getting the value from a nearby node, // we get the value from itself. const int bounce_z = (1 - i) * 3 + (1 - j); const int bounce_src = bounce_z + y * DIRECTIONS + x * DIRECTIONS * height; const double friction_loss = 0.9; out[target] = friction_loss * in[bounce_src]; } z++; } } } __global__ void density(double *out, double* in) { KERNEL_HEADER(x, y, width, height); // Compute target index. const int target = y + x * height; out[target] = 0; int z = 0; for(int i = -1; i <= 1; i++) { for(int j = -1; j <= 1; j++) { // Compute source index. const int source = z + y * DIRECTIONS + x * DIRECTIONS * height; out[target] += in[source]; z++; } } } __global__ void velocity(double *out, double* density, double* directions) { KERNEL_HEADER(x, y, width, height); // Compute target indices. const int target_x = 0 + y * DIMENSIONS + x * height * DIMENSIONS; const int target_y = 1 + y * DIMENSIONS + x * height * DIMENSIONS; out[target_x] = 0; out[target_y] = 0; int z = 0; for(int i = -1; i <= 1; i++) { for(int j = -1; j <= 1; j++) { const int source = z + y * DIRECTIONS + x * DIRECTIONS * height; out[target_x] += directions[source] * i; out[target_y] += directions[source] * j; z++; } } const int target = y + x * height; out[target_x] *= LATTICE_SPEED / density[target]; out[target_y] *= LATTICE_SPEED / density[target]; } __global__ void equilibrium(double *eq, double* density, double* velocity) { KERNEL_HEADER(x, y, width, height); const int density_src = y + x * height; const int xvel_src = 0 + y * DIMENSIONS + x * height * DIMENSIONS; const int yvel_src = 1 + y * DIMENSIONS + x * height * DIMENSIONS; // u . u double velmag = velocity[xvel_src] * velocity[xvel_src] + velocity[yvel_src] * velocity[yvel_src]; int z = 0; for(int i = -1; i <= 1; i++) { for(int j = -1; j <= 1; j++) { // Compute the weight. double weight; if(i == 0 && j == 0) { weight = 4.0 / 9.0; } else if(i == 0 || j == 0) { weight = 1.0 / 9.0; } else { weight = 1.0 / 36.0; } // e_i . u double dotprod = i * velocity[xvel_src] + j * velocity[yvel_src]; double sum = 1.0; sum += 3 / LATTICE_SPEED * dotprod; sum += 4.5 / (LATTICE_SPEED * LATTICE_SPEED) * dotprod * dotprod; sum -= 1.5 / (LATTICE_SPEED * LATTICE_SPEED) * velmag; const int target = z + y * DIRECTIONS + x * DIRECTIONS * height; eq[target] = weight * density[density_src] * sum; z++; } } } __global__ void update(double *out, double* equilibrium, double* directions) { KERNEL_HEADER(x, y, width, height); int z = 0; for(int i = -1; i <= 1; i++) { for(int j = -1; j <= 1; j++) { const int target = z + y * DIRECTIONS + x * DIRECTIONS * height; out[target] = directions[target] - (directions[target] - equilibrium[target]) / TAU; z++; } } }
21,227
#include "includes.h" __global__ void naive_bias_add(float *in, int size, float *bias, int bias_size) { int bid = blockIdx.x * blockDim.x + threadIdx.x; if (!(bid < size)) return; int bias_offset = bid - (bid / bias_size) * bias_size; in[bid] += bias[bias_offset]; }
21,228
#include "includes.h" __global__ void UpdateCC_XY( float *CCXY, int id_CC, float *XY_tofill, int dim_XY ){ int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; if(id < dim_XY) CCXY[id_CC*dim_XY + id] = XY_tofill[id]; }
21,229
#include <cuda.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <random> #include <cstdint> #include <iostream> #include <cstring> #define BLOCK_SIZE 32 void fill_matrix(float* matrix, uint64_t n); void print_matrix(float* matrix, uint64_t n); void run_basic(int blocks, int threads, uint64_t n); void run_better(dim3 blocks, int threads, uint64_t n); void run_optimized(dim3 blocks, dim3 threads, uint64_t n); // Uses 1 block with a 1D dimension; assumes total number of threads = N // Has 2xNxNxN total floating-point operations __global__ void gpu_basic_mm(float* matrix1, float* matrix2, float* result, uint64_t n) { // Divide threads into indices (assuming 1D blocks) //int num_threads = gridDim.x * blockDim.x; int thread_index = threadIdx.x + blockIdx.x * blockDim.x; // Assume num threads = n for (int row = 0; row < n; ++row) { float sum = 0; for (int item = 0; item < n; ++item) { sum += matrix1[row * n + item] * matrix2[item * n + thread_index]; } result[thread_index * n + row] = sum; } } // Assumes blockDim = NxA and threadDim = B where A * B = N // Has 2xNxNxN total floating-point operations // Uses N*N total threads to calculate result __global__ void gpu_better_mm(float* matrix1, float* matrix2, float* result, uint64_t n) { int row = blockIdx.x; int column = blockIdx.y * blockDim.y + threadIdx.x; // Assume num threads = n * n float sum = 0; for (int item = 0; item < n; ++item) { sum += matrix1[row * n + item] * matrix2[item * n + column]; } result[column * n + row] = sum; } // Assumes 2D blocks and 2D threads, where // BlockDim.x * ThreadDim.x = N // BlockDim.y * ThreadDim.y = N __global__ void gpu_optimized_mm(float* matrix1, float* matrix2, float* result, int n) { // Assumes square tiles int tile_size = blockDim.x; // Block row and column int tile_row = blockIdx.y; int tile_column = blockIdx.x; // this block's tile float* tile = &result[tile_size * tile_size * tile_row + tile_size * tile_column]; float value = 0; // Thread row and column for this tile int row = threadIdx.y; int col = threadIdx.x; for (int i = 0; i < (n / tile_size); ++i) { float *temp1 = &matrix1[tile_size * tile_size * tile_row + tile_size * i]; float *temp2 = &matrix2[tile_size * tile_size * i + tile_size * tile_column]; __shared__ float shared1[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared2[BLOCK_SIZE][BLOCK_SIZE]; shared1[row][col] = temp1[row * tile_size + col]; shared2[row][col] = temp2[row * tile_size + col]; __syncthreads(); for (int j = 0; j < tile_size; ++j) { value += shared1[row][j] * shared2[j][col]; } __syncthreads(); } tile[row * tile_size + col] = value; } int main() { std::cout << "================== Basic ==================" << std::endl; std::cout << "Block Dimension: " << 32 << ", Thread Dimension: " << 32 << ", Matrix Size: " << 1024 << std::endl; run_basic(32, 32, 1024); std::cout << std::endl; std::cout << "================== Basic ==================" << std::endl; std::cout << "Block Dimension: " << 64 << ", Thread Dimension: " << 64 << ", Matrix Size: " << 4096 << std::endl; run_basic(32, 32, 1024); std::cout << std::endl; std::cout << "================== Better ==================" << std::endl; std::cout << "Block Dimension: " << 1024 << "x" << 32 << ", Thread Dimension: " << 32 << ", Matrix Size: " << 1024 << std::endl; run_better(dim3(1024, 32), 32, 1024); std::cout << std::endl; std::cout << "================== Better ==================" << std::endl; std::cout << "Block Dimension: " << 4096 << "x" << 8 << ", Thread Dimension: " << 512 << ", Matrix Size: " << 4096 << std::endl; run_better(dim3(4096, 8), 512, 4096); std::cout << std::endl; std::cout << "================= Optimized =================" << std::endl; std::cout << "Block Dimension: " << 32 << "x" << 32 << ", Thread Dimension: " << 32 << "x" << 32 << ", Matrix Size: " << 1024 << std::endl; run_optimized(dim3(32, 32), dim3(32, 32), 1024); std::cout << std::endl; std::cout << "================= Optimized =================" << std::endl; std::cout << "Block Dimension: " << 128 << "x" << 128 << ", Thread Dimension: " << 32 << "x" << 32 << ", Matrix Size: " << 4096 << std::endl; run_optimized(dim3(128, 128), dim3(32, 32), 4096); std::cout << std::endl; } void run_basic(int blocks, int threads, uint64_t n) { float* m1 = new float[n * n]; float* m2 = new float[n * n]; float* result = new float[n * n]; float* g_m1; float* g_m2; float* g_result; cudaMalloc(reinterpret_cast<void**>(&g_m1), n * n * sizeof(float)); cudaMalloc(reinterpret_cast<void**>(&g_m2), n * n * sizeof(float)); cudaMalloc(reinterpret_cast<void**>(&g_result), n * n * sizeof(float)); fill_matrix(m1, n); fill_matrix(m2, n); cudaError_t code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Allocation Error: %s\n", cudaGetErrorString(code)); } // Timer start including memcpy operations //clock_t start = clock(); cudaMemcpy(g_m1, m1, n * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(g_m2, m2, n * n * sizeof(float), cudaMemcpyHostToDevice); code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Memcpy Error: %s\n", cudaGetErrorString(code)); } // Timer start excluding memcpy operations clock_t start = clock(); gpu_basic_mm<<<blocks, threads>>>(g_m1, g_m2, g_result, n); cudaThreadSynchronize(); // Timer end excluding memcpy operations float elapsed_seconds = (float)(clock() - start) / CLOCKS_PER_SEC; code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Kernel Error: %s\n", cudaGetErrorString(code)); } cudaMemcpy(result, g_result, n * n * sizeof(float), cudaMemcpyDeviceToHost); // Timer end including memcpy operations //float elapsed_seconds = (float)(clock() - start) / CLOCKS_PER_SEC; code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Memcpy Error 2: %s\n", cudaGetErrorString(code)); } float flops = (2 * n * n * n) / elapsed_seconds; std::cout << "Operations: " << (2 * n * n * n) << std::endl; printf("Seconds: %f\n", elapsed_seconds); std::cout << "FLOPS for gpu_basic_mm() at size " << n << " matrices = " << flops << std::endl; cudaFree(g_m1); cudaFree(g_m2); cudaFree(g_result); delete[] m1; delete[] m2; delete[] result; } void run_better(dim3 blocks, int threads, uint64_t n) { float* m1 = new float[n * n]; float* m2 = new float[n * n]; float* result = new float[n * n]; float* g_m1; float* g_m2; float* g_result; cudaMalloc(reinterpret_cast<void**>(&g_m1), n * n * sizeof(float)); cudaMalloc(reinterpret_cast<void**>(&g_m2), n * n * sizeof(float)); cudaMalloc(reinterpret_cast<void**>(&g_result), n * n * sizeof(float)); fill_matrix(m1, n); fill_matrix(m2, n); cudaError_t code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Allocation Error: %s\n", cudaGetErrorString(code)); } // Timer start including memcpy operations //clock_t start = clock(); cudaMemcpy(g_m1, m1, n * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(g_m2, m2, n * n * sizeof(float), cudaMemcpyHostToDevice); code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Memcpy Error: %s\n", cudaGetErrorString(code)); } // Timer start excluding memcpy operations clock_t start = clock(); gpu_better_mm<<<blocks, threads>>>(g_m1, g_m2, g_result, n); cudaThreadSynchronize(); // Timer end excluding memcpy operations float elapsed_seconds = (float)(clock() - start) / CLOCKS_PER_SEC; code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Kernel Error: %s\n", cudaGetErrorString(code)); } cudaMemcpy(result, g_result, n * n * sizeof(float), cudaMemcpyDeviceToHost); // Timer end including memcpy operations //float elapsed_seconds = (float)(clock() - start) / CLOCKS_PER_SEC; code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Memcpy Error 2: %s\n", cudaGetErrorString(code)); } float flops = (2 * n * n * n) / elapsed_seconds; std::cout << "Operations: " << (2 * n * n * n) << std::endl; printf("Seconds: %f\n", elapsed_seconds); std::cout << "FLOPS for gpu_better_mm() at size " << n << " matrices = " << flops << std::endl; cudaFree(g_m1); cudaFree(g_m2); cudaFree(g_result); delete[] m1; delete[] m2; delete[] result; } void run_optimized(dim3 blocks, dim3 threads, uint64_t n) { float* m1 = new float[n * n]; float* m2 = new float[n * n]; float* result = new float[n * n]; float* g_m1; float* g_m2; float* g_result; cudaMalloc(reinterpret_cast<void**>(&g_m1), n * n * sizeof(float)); cudaMalloc(reinterpret_cast<void**>(&g_m2), n * n * sizeof(float)); cudaMalloc(reinterpret_cast<void**>(&g_result), n * n * sizeof(float)); fill_matrix(m1, n); fill_matrix(m2, n); cudaError_t code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Allocation Error: %s\n", cudaGetErrorString(code)); } // Timer start including memcpy operations //clock_t start = clock(); cudaMemcpy(g_m1, m1, n * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(g_m2, m2, n * n * sizeof(float), cudaMemcpyHostToDevice); code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Memcpy Error: %s\n", cudaGetErrorString(code)); } // Timer start excluding memcpy operations clock_t start = clock(); gpu_optimized_mm<<<blocks, threads>>>(g_m1, g_m2, g_result, n); cudaThreadSynchronize(); // Timer end excluding memcpy operations float elapsed_seconds = (float)(clock() - start) / CLOCKS_PER_SEC; code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Kernel Error: %s\n", cudaGetErrorString(code)); } cudaMemcpy(result, g_result, n * n * sizeof(float), cudaMemcpyDeviceToHost); // Timer end including memcpy operations //float elapsed_seconds = (float)(clock() - start) / CLOCKS_PER_SEC; code = cudaPeekAtLastError(); if (code != cudaSuccess) { printf("Memcpy Error 2: %s\n", cudaGetErrorString(code)); } float flops = (2 * n * n * n) / elapsed_seconds; std::cout << "Operations: " << (2 * n * n * n) << std::endl; printf("Seconds: %f\n", elapsed_seconds); std::cout << "FLOPS for gpu_optimized_mm() at size " << n << " matrices = " << flops << std::endl; cudaFree(g_m1); cudaFree(g_m2); cudaFree(g_result); delete[] m1; delete[] m2; delete[] result; } // Fill matrix with random floats from 2 - 100 void fill_matrix(float* matrix, uint64_t n) { std::uniform_real_distribution<float> distribution(2, 100); std::default_random_engine generator; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { matrix[i * n + j] = distribution(generator); } } } // Print matrix (for debugging purposes) void print_matrix(float* matrix, uint64_t n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { printf("%f\t", matrix[i * n + j]); } printf("\n"); } }
21,230
//TO BE DONE LATER //#include "Prerequisites.cuh" //#include "CTF.cuh" //#include "FFT.cuh" //#include "Generics.cuh" //#include "Helper.cuh" //#include "Optimization.cuh" //#include "Transformation.cuh" // // //__global__ void LocalMinMax1DKernel(tfloat* d_input, int dim, int extent, tfloat2* d_min, tfloat2* d_max, uint2* d_minmap, uint2* d_maxmap); // // //////////////////////////////////////////////////////////////////////////// ////Find arbitrary number of local peaks and valleys with a defined extent// //////////////////////////////////////////////////////////////////////////// // //void d_LocalMinMax1D(tfloat* d_input, uint dim, uchar extent, tfloat2* &d_min, tfloat2* &d_max, uint* &d_offsetmin, uint* &d_offsetmax, uint batch) //{ // //} // // ////////////////// ////CUDA kernels// ////////////////// // //__device__ inline void Comparator(uint &keyA, uint &keyB) //{ // uint t; // // if (keyA > keyB) // { // t = keyA; // keyA = keyB; // keyB = t; // } //} // //__global__ void LocalMinMax1DKernel(tfloat* d_input, int dim, int extent, tfloat2* d_min, tfloat2* d_max, uint* d_nummin, uint* d_nummax, uint2* d_minmap, uint2* d_maxmap) //{ // __shared__ ushort s_keysmin[1536], s_keysmax[1536]; // __shared__ uint s_nummin, s_nummax; // for (uint i = threadIdx.x; i < 1536; i += blockDim.x) // { // s_keysmin[i] = 65535; // s_keysmax[i] = 65535; // } // if (threadIdx.x == 0) // { // s_nummin = 0; // s_nummax = 0; // } // d_input += dim * blockIdx.x; // d_minmap += blockIdx.x; // d_maxmap += blockIdx.x; // __syncthreads(); // // for (int i = threadIdx.x; i < dim; i += blockDim.x) // { // tfloat refval = d_input[i]; // char ismin = true, ismax = true; // int start = max(0, i - extent), finish = min(dim, i + extent + 1); // for (int w = start; w < finish; w++) // { // tfloat val = d_input[w]; // if (val < refval) // ismin = false; // else if (val > refval) // ismax = false; // } // // if (ismin == ismax) // continue; // // if (ismin) // { // uint oldindex = atomicInc(&s_nummin, 1536); // s_keysmin[oldindex] = i; // } // else if (ismax) // { // uint oldindex = atomicInc(&s_nummax, 1536); // s_keysmax[oldindex] = i; // } // } // // //}
21,231
#include <stdio.h> #include <time.h> #include <stdlib.h> enum { grid_count=16 }; __global__ void vectorAdditionKernel(float * A , float * B , float * C ,int dataCount){ int index = blockIdx.x *blockDim.x + threadIdx.x; if(index < dataCount) C[index] = A[index] + B[index]; } int main(){ int dataCount = 2100; float h_A[dataCount]; float h_B[dataCount]; float h_C[dataCount]; // initialize the values for(int i = 0 ; i < dataCount ; i++){ h_A[i] = (float) i ; h_B[i] = (float) i ; } float * d_A; cudaMalloc(&d_A , dataCount * sizeof(float)); cudaMemcpy(d_A ,h_A ,sizeof(float) * dataCount , cudaMemcpyHostToDevice ); float * d_B; cudaMalloc(&d_B , dataCount * sizeof(float)); cudaMemcpy(d_B , h_B , sizeof(float) * dataCount , cudaMemcpyHostToDevice); float * d_C; cudaMalloc(&d_C , dataCount * sizeof(float)); cudaMemcpy(d_C , h_C , dataCount * sizeof(float) , cudaMemcpyHostToDevice); // call the kernel int threadPerBlock = dataCount/grid_count; vectorAdditionKernel<<<grid_count,threadPerBlock>>> (d_A, d_B , d_C, dataCount); // get the data cudaMemcpy(h_C , d_C , dataCount * sizeof(float) , cudaMemcpyDeviceToHost); for(int i = 0 ; i < dataCount ;i++){ printf("%f \n" , h_C[i]); } return 0 ; }
21,232
/* * * saxpy.cu * * Part of the microdemo to illustrate how to initialize the driver API. * Compile this into ptx with: * * Build with: nvcc --ptx saxpy.cu * * The resulting .ptx file is needed by the sample saxpyDrv.cpp. * * Copyright (c) 2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ // saxpy global function adds in[i]*alpha to each element out[i] extern "C" __global__ void saxpy( float *out, const float *in, size_t N, float alpha ) { for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < N; i += blockDim.x*gridDim.x ) { out[i] += in[i]*alpha; } }
21,233
#include <math.h> #include <stdbool.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <thrust/extrema.h> #include <thrust/device_vector.h> typedef signed char schar; typedef unsigned char uchar; typedef short shrt; typedef unsigned short ushrt; typedef unsigned uint; typedef unsigned long ulong; typedef long long llong; typedef unsigned long long ullong; typedef float flt; typedef double dbl; typedef long double ldbl; class Compare { public: __host__ __device__ bool operator()(const dbl a, const dbl b) const { return fabs(a) < fabs(b); } }; __global__ static void row_switching(dbl * __restrict__, uint, uint, uint); __global__ static void column_to_null_down(dbl * __restrict__, uint, uint); __global__ static void column_to_null_up(dbl * __restrict__, uint, uint); __global__ static void solve(dbl * __restrict__, uint); __host__ static uint matrix_in(dbl ** __restrict__); __host__ static void matrix_out(dbl * __restrict__, uint); __host__ static void gauss_jordan_elimination(dbl * __restrict__, uint) noexcept; int main() { dbl *matrix; const uint n = matrix_in(&matrix); gauss_jordan_elimination(matrix, n); matrix_out(matrix, n); return 0; } __host__ static void gauss_jordan_elimination( dbl * __restrict__ const host_matrix, const uint n) noexcept { const Compare compare; const dim3 block = dim3(32U, 16U), thread = dim3(32U, 16U); dbl *device_matrix; cudaMalloc(&device_matrix, sizeof(dbl) * n * (n + 1)); cudaMemcpy(device_matrix, host_matrix, sizeof(dbl) * n * (n + 1), cudaMemcpyHostToDevice); const thrust::device_ptr<dbl> ptr = thrust::device_pointer_cast(device_matrix); for (uint i = 0; i < n - 1; ++i) { const uint max_idx = thrust::max_element( ptr + i * n + i, ptr + (i + 1) * n, compare) - ptr - i * n; if (max_idx != i) { row_switching<<<512U, 512U>>>(device_matrix, n, i, max_idx); } column_to_null_down<<<block, thread>>>(device_matrix, n, i); } for (uint i = n - 1; i > 0; --i) { column_to_null_up<<<512U, 512U>>>(device_matrix, n, i); } solve<<<512U, 512U>>>(device_matrix, n); cudaMemcpy(host_matrix + n * n, device_matrix + n * n, sizeof(dbl) * n, cudaMemcpyDeviceToHost); cudaFree(device_matrix); } __global__ static void row_switching(dbl * __restrict__ const m, const uint n, const uint i, const uint j) { const uint idx = threadIdx.x + blockDim.x * blockIdx.x, offset = blockDim.x * gridDim.x; for (uint k = i + idx; k <= n; k += offset) { const dbl temp = m[k * n + i]; m[k * n + i] = m[k * n + j]; m[k * n + j] = temp; } } __global__ static void column_to_null_down(dbl * __restrict__ const m, const uint n, const uint k) { const uint idxX = threadIdx.x + blockDim.x * blockIdx.x, idxY = threadIdx.y + blockDim.y * blockIdx.y, offsetX = blockDim.x * gridDim.x, offsetY = blockDim.y * gridDim.y; const dbl m_k_k = m[k * n + k]; for (uint j = k + 1 + idxY; j <= n; j += offsetY) { for (uint i = k + 1 + idxX; i < n; i += offsetX) { m[j * n + i] = fma(-m[k * n + i] / m_k_k, m[j * n + k], m[j * n + i]); } } } __global__ static void column_to_null_up(dbl * __restrict__ const m, const uint n, const uint k) { const uint idx = threadIdx.x + blockDim.x * blockIdx.x, offset = blockDim.x * gridDim.x; const dbl m_k_k = m[k * n + k], m_k_n = m[n * n + k]; for (uint i = idx; i < k; i += offset) { m[n * n + i] = fma(-m[k * n + i] / m_k_k, m_k_n, m[n * n + i]); } } __global__ static void solve(dbl * __restrict__ const m, const uint n) { const uint idx = threadIdx.x + blockDim.x * blockIdx.x, offset = blockDim.x * gridDim.x; for (uint k = idx; k < n; k += offset) { m[n * n + k] /= m[k * n + k]; } } __host__ static uint matrix_in(dbl ** const __restrict__ matrix_ptr) { uint n; scanf("%u", &n); dbl * const matrix = (dbl *) malloc(sizeof(dbl) * n * (n + 1)); for (uint i = 0; i < n; ++i) { for (uint j = 0; j < n; ++j) { scanf("%lf", matrix + j * n + i); } } for (uint i = 0; i < n; ++i) { scanf("%lf", matrix + n * n + i); } *matrix_ptr = matrix; return n; } __host__ static void matrix_out(dbl * __restrict__ const matrix, const uint n) { for (uint i = 0; i < n; ++i) { printf("%.10le ", matrix[n * n + i]); } putchar('\n'); free(matrix); }
21,234
extern "C" { __global__ void tx1mx(const int lengthX, const double *t, const double *x, double *z) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthX) { z[i] += t[i]*x[i]*(1.0-x[i]); } } }
21,235
/** * Author: Zachaiah Bryant * Description: Generates the average of two polykov loops across * distances 1-16 for various SU(2) lattice configurations. */ // ******************* // * Headers * // ******************* #include <sys/stat.h> //For checking file existance #include <iostream> #include <fstream> #include <string> #include "./Headers/LattiCuda.cuh" using namespace std; // ************************************* // * Definition of Variables * // ************************************* #define LATTSIZE 16 #define BETA 2.5 #define CONFIGS 10 #define SEPARATION 10 // *************************** // * Function Headers * // *************************** /** * Checks for the existance of a file * @param name - Destination and name to look for */ inline bool exist(const std::string& name) { struct stat buffer; return (stat (name.c_str(), &buffer) == 0); } /** * Function for creating a unique file for the polykov loop * @return string of name and file location */ std::string polyname() { string name = "../Data/Polykov/PolyVsDist"; name += ".dat"; int *iter = new int; *iter = 0; while(exist(name)) { *iter += 1; //Gets rid of .dat if(*iter == 1) { for(int i = 0; i < 4; i++) { name.pop_back(); } } else if(*iter <= 10) { for(int i = 0; i <= 4; i++) { name.pop_back(); } } else if(*iter <= 100) { for(int i = 0; i <= 5; i++) { name.pop_back(); } } else{ for(int i = 0; i <= 6; i++) { name.pop_back(); } } name += std::to_string(*iter); name += ".dat"; } delete iter; std::cout << name << "\n"; return name; }; // ************************** // * Main Function * // ************************** int main() { LattiCuda model(LATTSIZE, BETA); fstream file; file.open(polyname(), ios::out | ios::trunc); //Open Pre-Thermalized Lattice model.load(); //Equilibrate the loaded lattice a couple times for(int i = 0; i < 10; i++){ model.equilibrate(); } //Generate a given amount of configs for(int i = 0; i < 1; i++) { cout << i << endl; //Equilibrate to separate measurements for(int e = 0; e < SEPARATION; e++) { model.equilibrate(); } //Gather the average of two polykov loops for different //distances for the config for(int dist = 1; dist <= 1; dist++) { file << -log(model.polykov(dist))/LATTSIZE << " "; } file << "\n"; file.flush(); } file.close(); return 0; }
21,236
/*----------------------------------------------------------------------------*/ /** * This confidential and proprietary software may be used only as * authorised by a licensing agreement from ARM Limited * (C) COPYRIGHT 2011-2012 ARM Limited * ALL RIGHTS RESERVED * * The entire notice above must be reproduced on all authorised * copies and copies may only be made to the extent permitted * by a licensing agreement from ARM Limited. * * @brief Soft IEEE-754 floating point library. */ /*----------------------------------------------------------------------------*/ #include "softfloat.cuh" #define SOFTFLOAT_INLINE /****************************************** helper functions and their lookup tables ******************************************/ /* count leading zeroes functions. Only used when the input is nonzero. */ #if defined(__GNUC__) && (defined(__i386) || defined(__amd64)) #elif defined(__arm__) && defined(__ARMCC_VERSION) #elif defined(__arm__) && defined(__GNUC__) #else /* table used for the slow default versions. */ static const uint8_t clz_table[256] = { 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #endif /* 32-bit count-leading-zeroes function: use the Assembly instruction whenever possible. */ SOFTFLOAT_INLINE uint32_t clz32(uint32_t inp) { #if defined(__GNUC__) && (defined(__i386) || defined(__amd64)) uint32_t bsr; __asm__("bsrl %1, %0": "=r"(bsr):"r"(inp | 1)); return 31 - bsr; #else #if defined(__arm__) && defined(__ARMCC_VERSION) return __clz(inp); /* armcc builtin */ #else #if defined(__arm__) && defined(__GNUC__) uint32_t lz; __asm__("clz %0, %1": "=r"(lz):"r"(inp)); return lz; #else /* slow default version */ uint32_t summa = 24; if (inp >= UInt32_C(0x10000)) { inp >>= 16; summa -= 16; } if (inp >= UInt32_C(0x100)) { inp >>= 8; summa -= 8; } return summa + clz_table[inp]; #endif #endif #endif } static SOFTFLOAT_INLINE uint32_t rtne_shift32(uint32_t inp, uint32_t shamt) { uint32_t vl1 = UInt32_C(1) << shamt; uint32_t inp2 = inp + (vl1 >> 1); /* added 0.5 ulp */ uint32_t msk = (inp | UInt32_C(1)) & vl1; /* nonzero if odd. '| 1' forces it to 1 if the shamt is 0. */ msk--; /* negative if even, nonnegative if odd. */ inp2 -= (msk >> 31); /* subtract epsilon before shift if even. */ inp2 >>= shamt; return inp2; } static SOFTFLOAT_INLINE uint32_t rtna_shift32(uint32_t inp, uint32_t shamt) { uint32_t vl1 = (UInt32_C(1) << shamt) >> 1; inp += vl1; inp >>= shamt; return inp; } static SOFTFLOAT_INLINE uint32_t rtup_shift32(uint32_t inp, uint32_t shamt) { uint32_t vl1 = UInt32_C(1) << shamt; inp += vl1; inp--; inp >>= shamt; return inp; } /* convert from FP16 to FP32. */ sf32 sf16_to_sf32(sf16 inp) { uint32_t inpx = inp; /* This table contains, for every FP16 sign/exponent value combination, the difference between the input FP16 value and the value obtained by shifting the correct FP32 result right by 13 bits. This table allows us to handle every case except denormals and NaN with just 1 table lookup, 2 shifts and 1 add. */ static const int32_t tbl[64] = { Int32_C(0x80000000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x1C000), Int32_C(0x80038000), Int32_C(0x80038000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x54000), Int32_C(0x80070000) }; int32_t res = tbl[inpx >> 10]; res += inpx; /* the normal cases: the MSB of 'res' is not set. */ if (res >= 0) /* signed compare */ return res << 13; /* Infinity and Zero: the bottom 10 bits of 'res' are clear. */ if ((res & UInt32_C(0x3FF)) == 0) return res << 13; /* NaN: the exponent field of 'inp' is not zero; NaNs must be quitened. */ if ((inpx & 0x7C00) != 0) return (res << 13) | UInt32_C(0x400000); /* the remaining cases are Denormals. */ { uint32_t sign = (inpx & UInt32_C(0x8000)) << 16; uint32_t mskval = inpx & UInt32_C(0x7FFF); uint32_t leadingzeroes = clz32(mskval); mskval <<= leadingzeroes; return (mskval >> 8) + ((0x85 - leadingzeroes) << 23) + sign; } } /* Conversion routine that converts from FP32 to FP16. It supports denormals and all rounding modes. If a NaN is given as input, it is quietened. */ sf16 sf32_to_sf16(sf32 inp, roundmode rmode) { /* for each possible sign/exponent combination, store a case index. This gives a 512-byte table */ static const uint8_t tab[512] = { 0, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 50, 5, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 55, }; /* many of the cases below use a case-dependent magic constant. So we look up a magic constant before actually performing the switch. This table allows us to group cases, thereby minimizing code size. */ static const uint32_t tabx[60] = { UInt32_C(0), UInt32_C(0), UInt32_C(0), UInt32_C(0), UInt32_C(0), UInt32_C(0x8000), UInt32_C(0x80000000), UInt32_C(0x8000), UInt32_C(0x8000), UInt32_C(0x8000), UInt32_C(1), UInt32_C(0), UInt32_C(0), UInt32_C(0), UInt32_C(0), UInt32_C(0x8000), UInt32_C(0x8001), UInt32_C(0x8000), UInt32_C(0x8000), UInt32_C(0x8000), UInt32_C(0), UInt32_C(0), UInt32_C(0), UInt32_C(0), UInt32_C(0), UInt32_C(0x8000), UInt32_C(0x8000), UInt32_C(0x8000), UInt32_C(0x8000), UInt32_C(0x8000), UInt32_C(0xC8001FFF), UInt32_C(0xC8000000), UInt32_C(0xC8000000), UInt32_C(0xC8000FFF), UInt32_C(0xC8001000), UInt32_C(0x58000000), UInt32_C(0x38001FFF), UInt32_C(0x58000000), UInt32_C(0x58000FFF), UInt32_C(0x58001000), UInt32_C(0x7C00), UInt32_C(0x7BFF), UInt32_C(0x7BFF), UInt32_C(0x7C00), UInt32_C(0x7C00), UInt32_C(0xFBFF), UInt32_C(0xFC00), UInt32_C(0xFBFF), UInt32_C(0xFC00), UInt32_C(0xFC00), UInt32_C(0x90000000), UInt32_C(0x90000000), UInt32_C(0x90000000), UInt32_C(0x90000000), UInt32_C(0x90000000), UInt32_C(0x20000000), UInt32_C(0x20000000), UInt32_C(0x20000000), UInt32_C(0x20000000), UInt32_C(0x20000000) }; uint32_t p; uint32_t idx = rmode + tab[inp >> 23]; uint32_t vlx = tabx[idx]; switch (idx) { /* Positive number which may be Infinity or NaN. We need to check whether it is NaN; if it is, quieten it by setting the top bit of the mantissa. (If we don't do this quieting, then a NaN that is distinguished only by having its low-order bits set, would be turned into an INF. */ case 50: case 51: case 52: case 53: case 54: case 55: case 56: case 57: case 58: case 59: /* the input value is 0x7F800000 or 0xFF800000 if it is INF. By subtracting 1, we get 7F7FFFFF or FF7FFFFF, that is, bit 23 becomes zero. For NaNs, however, this operation will keep bit 23 with the value 1. We can then extract bit 23, and logical-OR bit 9 of the result with this bit in order to quieten the NaN (a Quiet NaN is a NaN where the top bit of the mantissa is set.) */ p = (inp - 1) & UInt32_C(0x800000); /* zero if INF, nonzero if NaN. */ return ((inp + vlx) >> 13) | (p >> 14); /* positive, exponent = 0, round-mode == UP; need to check whether number actually is 0. If it is, then return 0, else return 1 (the smallest representable nonzero number) */ case 0: /* -inp will set the MSB if the input number is nonzero. Thus (-inp) >> 31 will turn into 0 if the input number is 0 and 1 otherwise. */ return (uint32_t) (-(int32_t) inp) >> 31; /* negative, exponent = , round-mode == DOWN, need to check whether number is actually 0. If it is, return 0x8000 ( float -0.0 ) Else return the smallest negative number ( 0x8001 ) */ case 6: /* in this case 'vlx' is 0x80000000. By subtracting the input value from it, we obtain a value that is 0 if the input value is in fact zero and has the MSB set if it isn't. We then right-shift the value by 31 places to get a value that is 0 if the input is -0.0 and 1 otherwise. */ return ((vlx - inp) >> 31) + UInt32_C(0x8000); /* for all other cases involving underflow/overflow, we don't need to do actual tests; we just return 'vlx'. */ case 1: case 2: case 3: case 4: case 5: case 7: case 8: case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: case 17: case 18: case 19: case 40: case 41: case 42: case 43: case 44: case 45: case 46: case 47: case 48: case 49: return vlx; /* for normal numbers, 'vlx' is the difference between the FP32 value of a number and the FP16 representation of the same number left-shifted by 13 places. In addition, a rounding constant is baked into 'vlx': for rounding-away-from zero, the constant is 2^13 - 1, causing roundoff away from zero. for round-to-nearest away, the constant is 2^12, causing roundoff away from zero. for round-to-nearest-even, the constant is 2^12 - 1. This causes correct round-to-nearest-even except for odd input numbers. For odd input numbers, we need to add 1 to the constant. */ /* normal number, all rounding modes except round-to-nearest-even: */ case 30: case 31: case 32: case 34: case 35: case 36: case 37: case 39: return (inp + vlx) >> 13; /* normal number, round-to-nearest-even. */ case 33: case 38: p = inp + vlx; p += (inp >> 13) & 1; return p >> 13; /* the various denormal cases. These are not expected to be common, so their performance is a bit less important. For each of these cases, we need to extract an exponent and a mantissa (including the implicit '1'!), and then right-shift the mantissa by a shift-amount that depends on the exponent. The shift must apply the correct rounding mode. 'vlx' is used to supply the sign of the resulting denormal number. */ case 21: case 22: case 25: case 27: /* denormal, round towards zero. */ p = 126 - ((inp >> 23) & 0xFF); return (((inp & UInt32_C(0x7FFFFF)) + UInt32_C(0x800000)) >> p) | vlx; case 20: case 26: /* denornal, round away from zero. */ p = 126 - ((inp >> 23) & 0xFF); return rtup_shift32((inp & UInt32_C(0x7FFFFF)) + UInt32_C(0x800000), p) | vlx; case 24: case 29: /* denornal, round to nearest-away */ p = 126 - ((inp >> 23) & 0xFF); return rtna_shift32((inp & UInt32_C(0x7FFFFF)) + UInt32_C(0x800000), p) | vlx; case 23: case 28: /* denormal, round to nearest-even. */ p = 126 - ((inp >> 23) & 0xFF); return rtne_shift32((inp & UInt32_C(0x7FFFFF)) + UInt32_C(0x800000), p) | vlx; } return 0; } typedef union if32_ { uint32_t u; int32_t s; float f; } if32; /* convert from soft-float to native-float */ float sf16_to_float(sf16 p) { if32 i; i.u = sf16_to_sf32(p); return i.f; } /* convert from native-float to softfloat */ sf16 float_to_sf16(float p, roundmode rm) { if32 i; i.f = p; return sf32_to_sf16(i.u, rm); }
21,237
#include <stdio.h> __global__ void hello() { printf("hello world, Im thread %d on block %d\n", threadIdx.x, blockIdx.x); } int main(int argc, char *argv[]) { int deviceId, nDevices, taskID; cudaError_t err; cudaDeviceProp prop; if( argc == 3 ) { printf("The argument supplied are device-ID: %s and task-ID: %s\n", argv[1], argv[2]); deviceId = atoi(argv[1]); taskID = atoi(argv[2]); } else { printf("Usage: set_device.o device-id-integer n-taks-integer\n"); return 1; } err = cudaSetDevice(deviceId); if (err != cudaSuccess) { printf("API error %s:%d Returned:%d\n", __FILE__, __LINE__, err); return 1; } printf("Task id: %d - Desired DeviceId: %d\n", taskID, deviceId); cudaGetDeviceCount(&nDevices); printf("Number of devices available: %d\n", nDevices); cudaGetDeviceProperties(&prop, deviceId); printf("Device Number: %d\n", deviceId); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf("Finally..."); int num_threads = 2; int num_blocks = 2; hello<<<num_blocks,num_threads>>>(); cudaDeviceSynchronize(); return 0; }
21,238
//#include "simple_particle.cuh" //#include "device_launch_parameters.h" //#include "device_functions.h" //#include "math_functions.h" //#include "cuda_runtime.h" //#include <stdio.h> // //__constant__ simpleParticleSystem d_sps[1]; // //__global__ void generateParticles(); // //__global__ void renderParticles(uchar4* devPtr, int img_width, int img_height); // //__global__ void renderParticles_2(uchar4* devPtr, int img_width, int img_height, int tile_height); // //__global__ void updateParticles(float passed_time); // //__device__ float2 get_normal_vector(float rand_num); // //__device__ float get_energy(float2 p1, float2 p2, float dist_bound_powerd); // //__device__ uchar4 get_color_from_energy(float energy); // //__device__ float2 get_acceleration(int index); // //__device__ void update_particle_velocity(int index, float2 acc, float passed_time); // //__device__ int update_particle_position(int index, float passed_time); //return whether the particle is dead // //void init_particles_cuda(simpleParticleSystem &sps) { // int max_num_particles = sps.MAX_PARTICLE_SIZE; // int one_batch_num_particles = sps.ONE_BATCH_PARTICLE_SIZE; // // cudaMalloc((void**)&sps.energy, sizeof(*sps.energy)*max_num_particles); // cudaMalloc((void**)&sps.position, sizeof(*sps.position)*max_num_particles); // cudaMalloc((void**)&sps.velocity, sizeof(*sps.velocity)*max_num_particles); // cudaMalloc((void**)&sps.remain_time, sizeof(*sps.remain_time)*max_num_particles); // cudaMalloc((void**)&sps.rand_data, sizeof(*sps.rand_data)*one_batch_num_particles*3); // // cudaError_t err = cudaGetLastError(); // if (err != cudaSuccess) // printf("Memory Allocation Error: %s\n", cudaGetErrorString(err)); //} // //void destroy_particles_cuda(simpleParticleSystem &sps) { // cudaError_t er; // // er = cudaFree(sps.energy); // er = cudaFree(sps.position); // er = cudaFree(sps.velocity); // er = cudaFree(sps.remain_time); // er = cudaFree(sps.rand_data); // // cudaError_t err = cudaGetLastError(); // if (err != cudaSuccess) // printf("Memory Free Error: %s\n", cudaGetErrorString(err)); //} // //void copy_to_device_sps(simpleParticleSystem &sps) { // cudaError_t err = cudaMemcpyToSymbol(d_sps, &sps, sizeof(simpleParticleSystem)); // // if (err != cudaSuccess) // printf("Constant Memory Copy Error: %s\n", cudaGetErrorString(err)); //} // //void generate_particles(int generate_size) { // generateParticles << < 1, generate_size >> > (); // //generateParticlesLine <<< 1, sps.ONE_BATCH_PARTICLE_SIZE >>> ( // // sps.position, sps.velocity_orientation, sps.velocity, sps.remain_time, sps.rand_data, sps.ONE_BATCH_PARTICLE_SIZE, // // sps.MAX_PARTICLE_SIZE, sps.generator_line[0], sps.generator_line[1], sps.MAX_VELOCITY, sps.MIN_VELOCITY, sps.LIFE_TIME // //); //} // //void updata_particles(int generate_size, float passed_time) { // updateParticles << < 1, generate_size >> > (passed_time); //} // //void render_particles(uchar4* devPtr, int img_width, int img_height, int generate_size, int tile_height) { // int grid_size = (img_height + tile_height - 1) / tile_height; // renderParticles_2 << <grid_size, generate_size, img_width*tile_height*sizeof(float) >> > (devPtr, img_width, img_height, tile_height); // // //int thread_dim = 16; // //int grid_dim_x = (img_width + thread_dim - 1) / thread_dim; // //int grid_dim_y = (img_height + thread_dim - 1) / thread_dim; // //dim3 grids(grid_dim_x, grid_dim_y); // //dim3 threads(thread_dim, thread_dim); // //renderParticles << <grids, threads >> > (devPtr, img_width, img_height); //} // //__global__ void generateParticles() //{ // float2 *position = (*d_sps).position; // float2 *velocity = (*d_sps).velocity; // float *remain_time = (*d_sps).remain_time; // float *rand = (*d_sps).rand_data; // int generate_size = (*d_sps).ONE_BATCH_PARTICLE_SIZE; // int max_size = (*d_sps).MAX_PARTICLE_SIZE; // // unsigned int index = blockIdx.x*blockDim.x + threadIdx.x; // // __shared__ unsigned int generate_start_index; // // //get the particle generate block pos // if (index == 0) { // max_size -= generate_size; // generate_start_index = 0; // while (generate_start_index <= max_size) { // if (remain_time[generate_start_index] == 0) // break; // generate_start_index += generate_size; // } // } // // __syncthreads(); // // if (generate_start_index > max_size) // return; // // int pid = generate_start_index + index; // float x; // float y; // float2 velocity_orientation; // float n_velocity; // // //generate rand position and velocity // switch ((*d_sps).TYPE) // { // case LineGenerator: // x = rand[index] * ((*d_sps).generator_line[0].x - (*d_sps).generator_line[1].x) + (*d_sps).generator_line[1].x; // y = rand[index] * ((*d_sps).generator_line[0].y - (*d_sps).generator_line[1].y) + (*d_sps).generator_line[1].y; // position[pid] = make_float2(x, y); // // rand += generate_size; // pid = generate_start_index + index; // velocity_orientation = get_normal_vector(rand[index]); // // rand += generate_size; // n_velocity = rand[index] * ((*d_sps).MAX_VELOCITY - (*d_sps).MIN_VELOCITY) + (*d_sps).MIN_VELOCITY; // velocity[pid].x = n_velocity * velocity_orientation.x; // velocity[pid].y = n_velocity * velocity_orientation.y; // break; // case CircleGenerator: // float rand_pos = rand[index]; // float2 vec = get_normal_vector(rand_pos); // x = vec.x * (*d_sps).generator_radius.x + (*d_sps).generator_center.x; // y = vec.y * (*d_sps).generator_radius.y + (*d_sps).generator_center.y; // position[pid] = make_float2(x, y); // // rand += generate_size; // pid = generate_start_index + index; // float rand_orient = rand[index]; // rand_orient = rand_pos + (rand_orient / 2 - rand_orient / 4); // velocity_orientation = get_normal_vector(rand_orient); // // rand += generate_size; // n_velocity = rand[index] * ((*d_sps).MAX_VELOCITY - (*d_sps).MIN_VELOCITY) + (*d_sps).MIN_VELOCITY; // velocity[pid].x = n_velocity * velocity_orientation.x; // velocity[pid].y = n_velocity * velocity_orientation.y; // break; // default: // break; // } // // //generate remain time // remain_time[pid] = (*d_sps).LIFE_TIME; //} // //__global__ void renderParticles_2(uchar4* devPtr, int img_width, int img_height, int tile_height) { // unsigned int index = threadIdx.x; // unsigned int strip = blockDim.x; // unsigned int start_index = 0; // // extern __shared__ float energy_map[]; // // unsigned int start_height = blockIdx.x * tile_height; // unsigned int end_height = (blockIdx.x + 1) * tile_height; // if (end_height > img_height) // end_height = img_height; // // int offset = threadIdx.x; // while (offset < (end_height - start_height)*img_width) { // energy_map[offset] = 0; // offset += blockDim.x; // } // // __syncthreads(); // // float energy_scope = (*d_sps).ENERGY_SCOPE; // float dist_bound_powerd = energy_scope * energy_scope; // while (index < (*d_sps).MAX_PARTICLE_SIZE) { // if ((*d_sps).remain_time[start_index] != 0) { // if (index != start_index) { // float2 pos = (*d_sps).position[index]; // float start_y = pos.y - energy_scope; // float end_y = pos.y + energy_scope; // if (start_y < start_height) // start_y = start_height; // if (end_y > end_height) // end_y = end_height; // for (float y = start_y; y < end_y; y += 1) { // for (float dx = -energy_scope; dx <= energy_scope; dx += 1) { // float2 near_pos = make_float2(pos.x + dx, y); // float energy = get_energy(pos, near_pos, dist_bound_powerd); // if (energy != 0) { // int px = int(near_pos.x); // int py = int(near_pos.y); // if (px < 0) // px = 0; // if (px >= img_width) // px = img_width - 1; // atomicAdd(&energy_map[(py-start_height)*img_width + px], energy); // } // } // } // } // } // // index += strip; // start_index += strip; // } // // __syncthreads(); // // offset = threadIdx.x; // int offset_dev = threadIdx.x + start_height * img_width; // while (offset < (end_height - start_height)*img_width) { // float energy = energy_map[offset]; // if (energy > 1) // energy = 1; // devPtr[offset_dev] = get_color_from_energy(energy); // // offset += blockDim.x; // offset_dev += blockDim.x; // } //} // //__global__ void renderParticles(uchar4* devPtr, int img_width, int img_height) { // int x = threadIdx.x + blockIdx.x * blockDim.x; // int y = threadIdx.y + blockIdx.y * blockDim.y; // // if (x >= img_width || y >= img_height) // return; // // // if (!(x >= (*d_sps).LIFE_BOUND[0] && x <= (*d_sps).LIFE_BOUND[2] // && y <= (*d_sps).LIFE_BOUND[1] && y >= (*d_sps).LIFE_BOUND[3])) // return; // // int generate_size = (*d_sps).ONE_BATCH_PARTICLE_SIZE; // int max_size = (*d_sps).MAX_PARTICLE_SIZE; // float energy = 0; // float dist_bound_powerd = (*d_sps).ENERGY_SCOPE * (*d_sps).ENERGY_SCOPE; // float2 pos = make_float2(x, y); // for (int start_index = 0; start_index < max_size - generate_size; start_index += generate_size) // { // if ((*d_sps).remain_time[start_index] != 0) { // //here we do not render the first particle of the batch // for (int index = start_index + 1; index < start_index + generate_size; ++index) { // if ((*d_sps).remain_time[index] != 0) { // energy += get_energy((*d_sps).position[index], pos, dist_bound_powerd); // if (energy >= 1) { // energy = 1; // break; // } // } // } // if (energy >= 1) { // break; // } // } // } // // // int offset = x + y * img_width; // devPtr[offset] = get_color_from_energy(energy); //} // // //__global__ void updateParticles(float passed_time) { // unsigned int index = blockIdx.x*blockDim.x + threadIdx.x; // unsigned int strip = gridDim.x * blockDim.x; // unsigned int start_index = 0; // // __shared__ int living_particle_num; // // while (index < (*d_sps).MAX_PARTICLE_SIZE) { // living_particle_num = 0; // //__syncthreads(); // // if ((*d_sps).remain_time[start_index] != 0) { // if (index != start_index) { // float2 acc = get_acceleration(index); // update_particle_velocity(index, acc, passed_time); // int is_living = update_particle_position(index, passed_time); // if (is_living) { // living_particle_num += 1; // } // } // // __syncthreads(); // // if (index == start_index) { // if(living_particle_num == 0) // (*d_sps).remain_time[index] = 0; // else // (*d_sps).remain_time[index] = 1.0; // } // } // // index += strip; // start_index += strip; // } //} // // //__device__ float2 get_normal_vector(float rand_num) { // float x, y; // sincosf(rand_num*2*PI, &y, &x); // // return make_float2(x, y); //} // //__device__ float get_energy(float2 p1, float2 p2, float dist_bound_powerd) { // float dx = p1.x - p2.x; // float dy = p1.y - p2.y; // float dist_powered = dx*dx + dy*dy; // // if (dist_powered > dist_bound_powerd) // return 0; // if (dist_powered == 0) // return 0.05; // return 0.05 * sqrtf(dist_bound_powerd - dist_powered)/sqrtf(dist_bound_powerd); //} // //__device__ uchar4 get_color_from_energy(float energy) { // if (energy == 0) // return make_uchar4(0, 0, 0, 0); // // unsigned char r = 90 * energy + 160; // unsigned char g = 180 * energy; // unsigned char b = 60 * energy; // unsigned char w = 255 * energy; // // return make_uchar4(r, g, b, w); //} // //__device__ float2 get_acceleration(int index) { // float2 pos = (*d_sps).position[index]; // float acc_x = (((*d_sps).LIFE_BOUND[0] + (*d_sps).LIFE_BOUND[2]) / 2 - pos.x) * 1.0; // // return make_float2(acc_x, 80.0); //} // //__device__ void update_particle_velocity(int index, float2 acc, float passed_time) { // (*d_sps).velocity[index].x += acc.x * passed_time; // (*d_sps).velocity[index].y += acc.y * passed_time; //} // //__device__ int update_particle_position(int index, float passed_time) { // (*d_sps).remain_time[index] -= passed_time; // if ((*d_sps).remain_time[index] <= 0) { // (*d_sps).remain_time[index] = 0; // return 0; // } // // float2 *pos = &(*d_sps).position[index]; // (*pos).x += (*d_sps).velocity[index].x * passed_time; // (*pos).y += (*d_sps).velocity[index].y * passed_time; // float x = (*pos).x; // float y = (*pos).y; // // if (x > (*d_sps).LIFE_BOUND[0] && x < (*d_sps).LIFE_BOUND[2] // && y < (*d_sps).LIFE_BOUND[1] && y > (*d_sps).LIFE_BOUND[3]) { // // //if (x < (*d_sps).BOUND_BOX[0] + (*d_sps).ENERGY_SCOPE) // // (*d_sps).BOUND_BOX[0] = x - (*d_sps).ENERGY_SCOPE; // //else if (x >(*d_sps).BOUND_BOX[2] - (*d_sps).ENERGY_SCOPE) // // (*d_sps).BOUND_BOX[2] = x + (*d_sps).ENERGY_SCOPE; // //if (y < (*d_sps).BOUND_BOX[3] + (*d_sps).ENERGY_SCOPE) // // (*d_sps).BOUND_BOX[3] = y - (*d_sps).ENERGY_SCOPE; // //else if (y >(*d_sps).BOUND_BOX[1] - (*d_sps).ENERGY_SCOPE) // // (*d_sps).BOUND_BOX[1] = y + (*d_sps).ENERGY_SCOPE; // // return 1; // } // // (*d_sps).remain_time[index] = 0; // return 0; //} #include "simple_particle.cuh" #include "device_launch_parameters.h" #include "device_functions.h" #include "math_functions.h" #include <stdio.h> __constant__ simpleParticleSystem d_sps[1]; __global__ void generateParticles(float2 point_pos); __global__ void renderParticles(uchar4* devPtr, int img_width, int img_height); __global__ void updateParticles(float passed_time); __device__ void draw_circle(uchar4* devPtr, float *map_remain, float2 pos, float size, uchar4 color, float remain_time, int img_width, int img_height); __device__ float2 get_normal_vector(float rand_num); __device__ float2 get_acceleration(float2 position, simpleParticleSystem *sps, int pid); __device__ void update_particle_velocity(float2 &velocity, float2 acc, float passed_time); __device__ int update_particle_position(int index, float passed_time); //return whether the particle is dead __device__ void update_particle_size(simpleParticleSystem *sps, float resize_speed, int pid, float passed_time); void init_particles_cuda(simpleParticleSystem &sps, int image_width, int image_height) { int max_num_particles = sps.MAX_PARTICLE_NUM; int one_batch_num_particles = sps.ONE_BATCH_PARTICLE_NUM; cudaMalloc((void**)&sps.position, sizeof(*sps.position)*max_num_particles); cudaMalloc((void**)&sps.velocity, sizeof(*sps.velocity)*max_num_particles); cudaMalloc((void**)&sps.radius, sizeof(*sps.radius)*max_num_particles); cudaMalloc((void**)&sps.color_rgba, sizeof(*sps.color_rgba)*max_num_particles); cudaMalloc((void**)&sps.remain_time, sizeof(*sps.remain_time)*max_num_particles); cudaMalloc((void**)&sps.rand_data, sizeof(*sps.rand_data)*one_batch_num_particles * 4); cudaMalloc((void**)&sps.map_remain, sizeof(*sps.map_remain)*sps.MAP_WIDTH*sps.MAP_HEIGHT); cudaMalloc((void**)&sps.vortex_field, sizeof(*sps.vortex_field)*sps.VORTEX_WIDTH*sps.VORTEX_HEIGHT); cudaMalloc((void**)&sps.influence_factor, sizeof(*sps.influence_factor)*max_num_particles); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Memory Allocation Error: %s\n", cudaGetErrorString(err)); } void init_vortex_field(simpleParticleSystem &sps) { float2 *h_vortex = new float2[sps.VORTEX_WIDTH*sps.VORTEX_HEIGHT]; int index = 0; for (int y = 0; y < sps.VORTEX_HEIGHT; ++y) { for (int x = 0; x < sps.VORTEX_WIDTH; ++x) { float2 v0 = make_float2(x - sps.VORTEX_WIDTH / 2, y - sps.VORTEX_HEIGHT / 2); float2 v1 = make_float2(v0.y, -v0.x); h_vortex[index] = make_float2((v1.x - v0.x / 2) / (sps.VORTEX_WIDTH / 2), (v1.y - v0.y / 2) / (sps.VORTEX_HEIGHT / 2)); index += 1; } } cudaMemcpy(sps.vortex_field, h_vortex, sps.VORTEX_WIDTH*sps.VORTEX_HEIGHT * sizeof(*h_vortex), cudaMemcpyHostToDevice); delete[] h_vortex; } void destroy_particles_cuda(simpleParticleSystem &sps) { cudaError_t er; er = cudaFree(sps.position); er = cudaFree(sps.velocity); er = cudaFree(sps.radius); er = cudaFree(sps.color_rgba); er = cudaFree(sps.remain_time); er = cudaFree(sps.rand_data); er = cudaFree(sps.map_remain); er = cudaFree(sps.vortex_field); er = cudaFree(sps.influence_factor); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Memory Free Error: %s\n", cudaGetErrorString(err)); } void copy_to_device_sps(simpleParticleSystem &sps) { cudaError_t err = cudaMemcpyToSymbol(d_sps, &sps, sizeof(simpleParticleSystem)); if (err != cudaSuccess) printf("Constant Memory Copy Error: %s\n", cudaGetErrorString(err)); } void generate_particles(int generate_size, float2 point_pos) { generateParticles << < 1, generate_size >> > (point_pos); //generateParticlesLine <<< 1, sps.ONE_BATCH_PARTICLE_SIZE >>> ( // sps.position, sps.velocity_orientation, sps.velocity, sps.remain_time, sps.rand_data, sps.ONE_BATCH_PARTICLE_SIZE, // sps.MAX_PARTICLE_SIZE, sps.generator_line[0], sps.generator_line[1], sps.MAX_VELOCITY, sps.MIN_VELOCITY, sps.LIFE_TIME //); } void updata_particles(int generate_size, float passed_time) { updateParticles << < 1, generate_size >> > (passed_time); } void render_particles(uchar4* devPtr, int img_width, int img_height) { cudaMemset((*d_sps).map_remain, 0, img_width*img_height * sizeof(*((*d_sps).map_remain))); cudaMemset(devPtr, 0, img_width*img_height * sizeof(*devPtr)); int thread_dim = 24; int grid_dim = 16; dim3 grids(grid_dim); dim3 threads(thread_dim); renderParticles << <grids, threads >> > (devPtr, img_width, img_height); } __global__ void generateParticles(float2 point_pos) { float2 *position = (*d_sps).position; float2 *velocity = (*d_sps).velocity; float *radius = (*d_sps).radius; uchar4 *color_rgba = (*d_sps).color_rgba; float *remain_time = (*d_sps).remain_time; float *rand = (*d_sps).rand_data; float *influence_factor = (*d_sps).influence_factor; int generate_size = (*d_sps).ONE_BATCH_PARTICLE_NUM; int max_size = (*d_sps).MAX_PARTICLE_NUM; unsigned int index = blockIdx.x*blockDim.x + threadIdx.x; __shared__ unsigned int generate_start_index; //get the particle generate block pos if (index == 0) { max_size -= generate_size; generate_start_index = 0; while (generate_start_index <= max_size) { if (remain_time[generate_start_index] == 0) break; generate_start_index += generate_size; } } __syncthreads(); if (generate_start_index > max_size) { printf("generate manle\n"); return; } int pid = generate_start_index + index; float x; float y; float2 velocity_orientation; float n_velocity; // position position[pid] = point_pos; //generate rand color and velocity float rand_color = rand[index]; if (rand_color > 0.5) { color_rgba[pid] = make_uchar4(255, 255, 0, 128); } else if (rand_color <= 0.5) { color_rgba[pid] = make_uchar4(0, 255, 255, 128); } rand += generate_size; float rand_orient = rand[index]; velocity_orientation = get_normal_vector(rand_orient); rand += generate_size; n_velocity = rand[index] * ((*d_sps).MAX_VELOCITY - (*d_sps).MIN_VELOCITY) + (*d_sps).MIN_VELOCITY; velocity[pid].x = n_velocity * velocity_orientation.x; velocity[pid].y = n_velocity * velocity_orientation.y; //generate rand influence_factor, size and remain time rand += generate_size; //printf("rand: %f", rand[index]); influence_factor[pid] = rand[index] * ((*d_sps).MAX_INFLU_FACTOR - (*d_sps).MIN_INFLU_FACTOR) + (*d_sps).MIN_INFLU_FACTOR; radius[pid] = rand[index] * ((*d_sps).MAX_RADIUS - (*d_sps).MIN_RADIUS) + (*d_sps).MIN_RADIUS; remain_time[pid] = rand[index] * ((*d_sps).MAX_LIFE_TIME - (*d_sps).MIN_LIFE_TIME) + (*d_sps).MIN_LIFE_TIME; } __global__ void renderParticles(uchar4* devPtr, int img_width, int img_height) { int batch_size = (*d_sps).ONE_BATCH_PARTICLE_NUM; int max_size = (*d_sps).MAX_PARTICLE_NUM; int batch_inner_start_id = threadIdx.x; int batch_inner_step = blockDim.x; int batch_outer_start_id = blockIdx.x * batch_size; int batch_outer_step = gridDim.x * batch_size; for (int batch_start_id = batch_outer_start_id; batch_start_id < max_size - batch_outer_step; batch_start_id += batch_outer_step) { if ((*d_sps).remain_time[batch_start_id] > 0) { for (int inner_id = batch_inner_start_id; inner_id < batch_size; inner_id += batch_inner_step) { if (inner_id == 0) continue; int id = batch_start_id + inner_id; float this_remain_time = (*d_sps).remain_time[id]; if (this_remain_time > 0) { float* map_remain = (*d_sps).map_remain; float2 pos = (*d_sps).position[id]; float size = (*d_sps).radius[id]; uchar4 color = (*d_sps).color_rgba[id]; float remain_time = (*d_sps).remain_time[id]; //printf("position: %f, %f size: %f, color: %d %d %d %d \n", pos.x, pos.y, size, color.x, color.y, color.z, color.w); draw_circle(devPtr, map_remain, pos, size, color, remain_time, img_width, img_height); } } } } } __global__ void updateParticles(float passed_time) { unsigned int index = blockIdx.x*blockDim.x + threadIdx.x; unsigned int strip = gridDim.x * blockDim.x; unsigned int start_index = blockIdx.x*blockDim.x; __shared__ int living_particle_num; while (index < (*d_sps).MAX_PARTICLE_NUM) { living_particle_num = 0; //__syncthreads(); // printf("index: %d, remain time: %f\n", index, (*d_sps).remain_time[index]); if ((*d_sps).remain_time[start_index] != 0) { if (index != start_index) { update_particle_size(d_sps, -0.4, index, passed_time); float2 acc = get_acceleration((*d_sps).position[index], d_sps, index); //printf("acc %f %f\n", acc.x, acc.y); update_particle_velocity((*d_sps).velocity[index], acc, passed_time); int is_living = update_particle_position(index, passed_time); if (is_living) { living_particle_num += 1; } } __syncthreads(); if (index == start_index) { if (living_particle_num == 0) (*d_sps).remain_time[index] = 0; else (*d_sps).remain_time[index] = 1.0; } } index += strip; start_index += strip; } } __device__ void draw_circle(uchar4* devPtr, float *map_remain, float2 pos, float size, uchar4 color, float remain_time, int img_width, int img_height) { float size_square = size * size; for (int y = pos.y - size; y < pos.y + size + 1; ++y) { if (y < 0 || y >= img_height) continue; for (int x = pos.x - size; x < pos.x + size + 1; ++x) { if (x < 0 || x >= img_width) continue; float dist_square = (x - pos.x)*(x - pos.x) + (y - pos.y)*(y - pos.y); if (dist_square < size_square) { int offset = x + y * img_width; //if (map_remain[offset] < remain_time) { map_remain[offset] = remain_time; devPtr[offset] = color; //} } } } } __device__ float2 get_normal_vector(float rand_num) { float x, y; sincosf(rand_num * 2 * PI, &y, &x); return make_float2(x, y); } __device__ uchar4 get_color_from_energy(float energy) { if (energy == 0) return make_uchar4(0, 0, 0, 0); unsigned char r = 90 * energy + 160; unsigned char g = 180 * energy; unsigned char b = 60 * energy; unsigned char w = 255 * energy; return make_uchar4(r, g, b, w); } __device__ float2 get_acceleration(float2 position, simpleParticleSystem *sps, int pid) { int vortex_x = position.x / sps->MAP_WIDTH * (sps->VORTEX_WIDTH); int vortex_y = position.y / sps->MAP_HEIGHT * (sps->VORTEX_HEIGHT); int index = vortex_y * sps->VORTEX_WIDTH + vortex_x; float2 acc = make_float2(sps->vortex_field[index].x * sps->influence_factor[pid], sps->vortex_field[index].y * sps->influence_factor[pid]); //printf("address ^%d: acc0 %f %f\n", sps, sps->vortex_field[index].x, sps->influence_factor[index]); return acc; } __device__ void update_particle_velocity(float2 &velocity, float2 acc, float passed_time) { velocity.x += acc.x * passed_time; velocity.y += acc.y * passed_time; } __device__ int update_particle_position(int index, float passed_time) { (*d_sps).remain_time[index] -= passed_time; if ((*d_sps).remain_time[index] <= 0) { (*d_sps).remain_time[index] = 0; return 0; } float2 *pos = &(*d_sps).position[index]; (*pos).x += (*d_sps).velocity[index].x * passed_time; (*pos).y += (*d_sps).velocity[index].y * passed_time; float x = (*pos).x; float y = (*pos).y; //printf("velocity: %f, %f remain time: %f \n", (*d_sps).velocity[index].x, (*d_sps).velocity[index].y, (*d_sps).remain_time[index]); if (x < (*d_sps).MAP_WIDTH && x >= 0 && y >= 0 && y < (*d_sps).MAP_HEIGHT) { return 1; } (*d_sps).remain_time[index] = 0; return 0; } __device__ void update_particle_size(simpleParticleSystem *sps, float resize_speed, int pid, float passed_time) { sps->radius[pid] += resize_speed * passed_time; if (sps->radius[pid] < 1) { sps->radius[pid] = 1; } }
21,239
#include <stdio.h> // for printf #define N 64 // constant, threads per block #define TPB 32 // constant, threads per block // converts int to evenly spaced floats // ie) .1, .2, ..., .5, ..., .9 float scale(int i, int n) { return ((float) i) / (n - 1); } // Computes distance between 2 points on a line __device__ float distance(float x1, float x2) { return sqrt((x2 - x1) * (x2 - x1)); } __global__ void distanceKernel(float *d_out, float *d_in, float ref) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const float x = d_in[i]; d_out[i] = distance(x, ref); printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]); } // Auto run main method int main() { float ref = 0.5f; // declare pointers to device arrays float *in = 0; float *out = 0; // allocate device memory to device arrays cudaMallocManaged(&in, N * sizeof(float)); cudaMallocManaged(&out, N * sizeof(float)); // launch kernel to copute and store distance values for(int i = 0; i < N; i++) { in[i] = scale(i, N); } // launch kernel to compute and store distance vals distanceKernel<<<N/TPB, TPB>>>(out, in, ref); cudaDeviceSynchronize(); // free memory for device arrays cudaFree(in); cudaFree(out); return 0; }
21,240
#include <stdio.h> #include <stdlib.h> int loadFileIntoMemory( char **memory, const char *filename ) { size_t file_size; char pad; int i; // Opens the file FILE *fp = fopen(filename, "rb"); // Makes sure the file was really opened if (fp == NULL) { *memory = NULL; return -1; } // Determines file size fseek(fp, 0, SEEK_END); file_size = ftell(fp); // Returns file pointer to the beginning fseek(fp, 0, SEEK_SET); // Calculates padding pad = (-file_size) % 16; file_size += pad; // Allocates memory *memory = (char *) malloc(file_size + 1); // Loads file into memory, making sure the copy's size is the same as the original's if (file_size - pad != fread(*memory, sizeof(char), file_size, fp)) { free(*memory); return -2; } // Closes the file handler (?) fclose(fp); // Pads the message for (i = 0; i < pad; ++i) { (*memory)[file_size - pad + i] = pad; } // O que raios é que isso faz? (*memory)[file_size] = 0; return file_size; } int writeToFile( char *memory, const char *filename, size_t file_size ) { FILE *file = fopen(filename, "wb"); char possible_pad = memory[file_size - 1]; char counter = 0; while (memory[file_size - counter - 1] == possible_pad) counter++; if (counter == possible_pad) file_size -= possible_pad; fwrite(memory, sizeof(char), file_size, file); fclose(file); return 0; }
21,241
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #define N 512 #define MAX_ERR 1e-6 //__global__ void vector_add(float *out, float *a, float *b, int n) { // int stride = 1; // int tid = blockIdx.x * blockDim.x + threadIdx.x; // 0 * 256 + 1 = 1 | BLOCK0 | // 0 * 256 + 2 = 2 // 1 * 256 + 1 = 257 | BLOCK1 | // 1 * 256 + 2 = 258 // out[tid] = a[tid] + b[tid]; //} void print_results(float *C){ printf("["); for(int i = 0 ; i < 4; i++){ printf("%f,",C[i]); } printf("]\n"); } __global__ void vector_add(float *CUDA_A, float *CUDA_B, float *CUDA_C, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; CUDA_C[tid] = CUDA_A[tid] + CUDA_B[tid]; } __global__ void vector_sub(float *CUDA_A, float *CUDA_B, float *CUDA_C, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; CUDA_C[tid] = CUDA_A[tid] - CUDA_B[tid]; } __global__ void vector_dot_product(float *CUDA_A, float *CUDA_B, float *CUDA_C,float *CUDA_K, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float SHARED_K[1024]; CUDA_C[tid] = CUDA_A[tid] * CUDA_B[tid]; // Only one kernel should apply the dot product __syncthreads(); SHARED_K[tid] = CUDA_C[tid * 2] + CUDA_C[tid * 2 + 1]; __syncthreads(); if (tid == 0){ *CUDA_K = SHARED_K[0] + SHARED_K[1]; } } void print_vectors(){ printf("A = {2.0,4.0,6.0,8.0}\n"); printf("B = {1.0,2.0,3.0,10.0}\n"); }; int main(){ float *C, *K; float *CUDA_A, *CUDA_B, *CUDA_C, *CUDA_K; // Allocate host memory float A[4]= {2.0,4.0,6.0,10.0}; float B[4]= {1.0,2.0,3.0,8.0}; C = (float*)malloc(sizeof(float) * N); K = (float*)malloc(sizeof(float)); // Allocate device memory cudaMalloc((void**)&CUDA_A, sizeof(float) * N); cudaMalloc((void**)&CUDA_B, sizeof(float) * N); cudaMalloc((void**)&CUDA_C, sizeof(float) * N); cudaMalloc((void**)&CUDA_C, sizeof(float) * N); cudaMalloc((void**)&CUDA_K, sizeof(float)); // Transfer data from host to device memory cudaMemcpy(CUDA_A, A, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(CUDA_B, B, sizeof(float) * N, cudaMemcpyHostToDevice); // Executing kernel vector_add<<<1,4>>>(CUDA_A, CUDA_B, CUDA_C, N); cudaMemcpy(C, CUDA_C, sizeof(float) * N, cudaMemcpyDeviceToHost); //Executing kernel puts("ADDING"); print_vectors(); print_results(C); vector_sub<<<1,4>>>(CUDA_A, CUDA_B, CUDA_C, N); cudaMemcpy(C, CUDA_C, sizeof(float) * N, cudaMemcpyDeviceToHost); puts("SUBSTRACTING"); print_vectors(); print_results(C); vector_dot_product<<<1,4>>>(CUDA_A, CUDA_B, CUDA_C, CUDA_K, N); cudaMemcpy(C, CUDA_C, sizeof(float) * N, cudaMemcpyDeviceToHost); puts("DOT_PRODUCT"); print_vectors(); print_results(C); cudaMemcpy(K, CUDA_K, sizeof(float), cudaMemcpyDeviceToHost); printf("\nDot product result %f\n", *K); // Deallocate device memory cudaFree(CUDA_A); cudaFree(CUDA_B); cudaFree(CUDA_C); // Deallocate host memory //free(A); //free(B); free(C); }
21,242
/* * gpuMerge.cu * * Created on: Dec 16, 2018 * Author: Orai Dezso Gergely */ #include "gpuMerge.cuh" #include <iostream> #include <stdio.h> static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define MIN_RUNTIME_VERSION 1000 #define MIN_COMPUTE_VERSION 0x10 int MaxThread = 512; int BlockNum=2,CoreInBlock=128; void cmerge(float *l, float *r, float *to, float *end, int length){ //int length = r - l; float *lend=(r<end) ? r : end; float *rend=(r+length <end) ? r+length : end; while(true){ if(l==lend){ while(r<rend){ *to++=*r++; } break; } if(r>=rend){ while(l<lend){ *to++=*l++; } break; } *to++ = (*l < *r) ? *l++ : *r++; } } void cpuMergeSort(float *data, unsigned int size, int length=1) { float *tmp = new float[size]; float *akt = data; float *next = tmp; for (; length < size; length *= 2){ float *end=akt+size; for(unsigned col = 0; col< size; col+=2*length){ cmerge(akt + col, akt + col + length, next + col, end, length); } float *c = akt; akt=next; next=c; } if(akt!=data)for(unsigned i=0;i<size;++i)data[i]=akt[i]; delete[] tmp; } /** * CUDA kernel what merges two float arrays */ __device__ void kernelMerge(float *l, float *r, float *to, float *end, int length){ float *lend=(r<end) ? r : end; float *rend=(r+length <end) ? r+length : end; while(true){ if(l==lend){ while(r<rend){ *to++=*r++; } break; } if(r>=rend){ while(l<lend){ *to++=*l++; } break; } *to++ = (*l < *r) ? *l++ : *r++; } } /** * CUDA kernel that sorts a float array */ __global__ void gpuKernelMergeSort(float *data, float *tmpIn, unsigned int fullSize, unsigned int size, unsigned int length=1) { unsigned idx = blockIdx.x*blockDim.x+threadIdx.x; float *tmp = tmpIn + (idx * size); float *akt = data + (idx * size); // The size of the last section is diferent so we have to check it if(data+fullSize > akt) size = (data + fullSize) - akt; float *next = tmp; for (; length < size; length *= 2){ float *end=akt+size; for(unsigned col = 0; col< size; col+=2*length){ kernelMerge(akt + col, akt + col + length, next + col, end, length); } float *c = akt; akt=next; next=c; } if(akt != data+(idx*size))for(unsigned i=0;i<size;++i)data[i]=akt[i]; } /** * Host function that copies the data and launches the work on GPU */ void gpuMergeSort(float *data, unsigned size) { if(size < CoreInBlock*BlockNum*4){ cpuMergeSort(data,size); return; } float *gpuData; CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuData, sizeof(float)*size)); CUDA_CHECK_RETURN(cudaMemcpy(gpuData, data, sizeof(float)*size, cudaMemcpyHostToDevice)); float *tmp; CUDA_CHECK_RETURN(cudaMalloc((void **)&tmp, sizeof(float)*size)); int arraySizeInBlock = CoreInBlock*BlockNum; gpuKernelMergeSort<<<BlockNum,CoreInBlock>>>(gpuData, tmp, size, arraySizeInBlock); gpuKernelMergeSort<<<1,1>>>(gpuData, tmp, size, size, arraySizeInBlock); CUDA_CHECK_RETURN(cudaMemcpy(data, gpuData, sizeof(float)*size, cudaMemcpyDeviceToHost)); //cpuMergeSort(data,size,arraySizeInBlock); CUDA_CHECK_RETURN(cudaFree(gpuData)); CUDA_CHECK_RETURN(cudaFree(tmp)); } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); } /** * Get core/sm for optimalization purposes */ int getSPcores(cudaDeviceProp devProp) { int cores = 0; switch (devProp.major){ case 2: // Fermi if (devProp.minor == 1) cores = 48; else cores = 32; break; case 3: // Kepler cores = 192; break; case 5: // Maxwell cores = 128; break; case 6: // Pascal if (devProp.minor == 1) cores = 128; else if (devProp.minor == 0) cores = 64; else printf("Unknown device type\n"); break; case 7: // Volta if (devProp.minor == 0) cores = 64; else printf("Unknown device type\n"); break; default: printf("Unknown device type\n"); break; } return cores; } bool findCudaDevice(){ int deviceCount, bestDev=-1; CUDA_CHECK_RETURN(cudaGetDeviceCount(&deviceCount)); for (int dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); int runtimeVersion = 0; cudaRuntimeGetVersion(&runtimeVersion); if (runtimeVersion >= MIN_RUNTIME_VERSION && ((deviceProp.major<<4) + deviceProp.minor) >= MIN_COMPUTE_VERSION) { if (bestDev == -1) { bestDev = dev; MaxThread = deviceProp.maxThreadsPerBlock; BlockNum=deviceProp.multiProcessorCount; CoreInBlock=getSPcores(deviceProp); if(CoreInBlock==0)return false; } } } if(bestDev != -1)cudaSetDevice(bestDev); return bestDev != -1; }
21,243
#include <stdio.h> #include <stdlib.h> #include <math.h> // #include <cuda.h> // #include <curand_kernel.h> // #include <cuda_runtime.h> // #include <cuda_runtime_api.h> // #include <helper_cuda.h> #include <iostream> #include <time.h> #include <sys/time.h> #define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__) template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << cudaGetErrorString(err) << " " << func << std::endl; exit(1); } } // #include <thrust/host_vector.h> /* Radix sort Sort Integer ranging from 0 - 255 */ #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif const int block_size = 1024; const int DIM = 32; const int MAX_THREADS_PER_BLOCK = 65535; const int FIND_MAX_THREADS = 16; //allocate to shared memory /* From stackoverflow */ int rand_lim(int limit) { /* return a random number between 0 and limit inclusive. */ int divisor = RAND_MAX/(limit+1); int retval; do { retval = rand() / divisor; } while (retval > limit); return retval; } void make_data(int* arr, int N) { printf("Making data\n"); for (int i = 0; i < N; i++){ arr[i] = rand_lim(4095); } printf("Finish making data\n"); } void check_sort(int* arr, int N){ for (int i = 0; i < N-1; i++) { // printf("%d\n", arr[i]); if (arr[i] > arr[i+1]) fprintf(stderr, "arr[%d] > arr[%d] - %d > %d\n", i, i+1, arr[i], arr[i+1]); } fflush(stderr); } __global__ void findMax(int* arr, int* d_collectMax, int N) { __shared__ int s_inputVals[FIND_MAX_THREADS]; // printf("Enter kernelll"); int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N){ s_inputVals[threadIdx.x] = arr[idx]; // printf("%d\n", arr[idx]); } else s_inputVals[threadIdx.x] = 0; __syncthreads(); int half = FIND_MAX_THREADS / 2; while (half != 0) { if (threadIdx.x < half) { s_inputVals[threadIdx.x] = max(s_inputVals[threadIdx.x], s_inputVals[threadIdx.x + half]); } half /= 2; __syncthreads(); } d_collectMax[blockIdx.x] = s_inputVals[0]; } __global__ void markArr(int* d_arr, int *d_collectScan, int pos, int N, int compare) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N){ d_collectScan[idx] = (d_arr[idx] & pos) == compare; } } __global__ void scanSB(int *d_collectScan, int *d_collectSumScan, int *d_sumBlock, int N, int numMaxBlock) { __shared__ int s_inputVals[FIND_MAX_THREADS]; __shared__ int s_inputValsTMP[FIND_MAX_THREADS]; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N){ s_inputVals[threadIdx.x] = d_collectScan[idx]; } else s_inputVals[threadIdx.x] = 0; __syncthreads(); int dist = 1; int count = 0; while (dist < FIND_MAX_THREADS) { if (count % 2 == 0){ s_inputValsTMP[threadIdx.x] = s_inputVals[threadIdx.x]; if (threadIdx.x >= dist) { s_inputValsTMP[threadIdx.x] += s_inputVals[threadIdx.x - dist]; } } else { s_inputVals[threadIdx.x] = s_inputValsTMP[threadIdx.x]; if (threadIdx.x >= dist) { s_inputVals[threadIdx.x] += s_inputValsTMP[threadIdx.x - dist]; } } dist *= 2; count++; __syncthreads(); } if (count % 2 == 0){ if(idx < N) d_collectSumScan[idx] = s_inputVals[threadIdx.x]; d_sumBlock[blockIdx.x] = s_inputVals[FIND_MAX_THREADS - 1]; } else { if(idx < N) d_collectSumScan[idx] = s_inputValsTMP[threadIdx.x]; d_sumBlock[blockIdx.x] = s_inputValsTMP[FIND_MAX_THREADS - 1]; } } __global__ void scanBlockSum(int *d_sumBlock, int numMaxBlock) { __shared__ int s_sumBlock[FIND_MAX_THREADS]; __shared__ int s_sumBlockTMP[FIND_MAX_THREADS]; int idx = threadIdx.x; if(idx >= numMaxBlock) return; s_sumBlock[idx] = d_sumBlock[idx]; __syncthreads(); int dist = 1; int count = 0; while (dist < numMaxBlock) { if(count % 2 == 0){ s_sumBlockTMP[idx] = s_sumBlock[idx]; if (idx >= dist) { s_sumBlockTMP[idx] += s_sumBlock[idx - dist]; } } else { s_sumBlock[idx] = s_sumBlockTMP[idx]; if (idx >= dist) { s_sumBlock[idx] += s_sumBlockTMP[idx - dist]; } } dist *= 2; count++; __syncthreads(); } if (count % 2 == 0){ if(idx < numMaxBlock) d_sumBlock[idx] = s_sumBlock[idx]; //else d_sumBlock[0] = 0; } else { if(idx < numMaxBlock) d_sumBlock[idx] = s_sumBlockTMP[idx]; //else d_sumBlock[0] = 0; } // d_sumBlock[0] = 0; } __global__ void mergeScan(int* arr, int* d_collectScan, int* d_collectSumScan, int* d_interVals, int offset, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; // if (idx == 0) printf("{%d} ", d_collectScan[idx]); if (d_collectScan[idx]==0 || idx >= N) return; d_interVals[d_collectSumScan[idx] + offset - 1] = arr[idx]; } __global__ void copyData(int* d_dst, int* d_src, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= N) return; d_dst[idx] = d_src[idx]; } // __global__ void makeData(int* arr, // int N, // curandState* state) { // int idx = threadIdx.x + blockIdx.x * blockDim.x; // float myrandf = curand_uniform(state + idx); // myrandf *= (255 + 0.999999); // arr[idx]= (int)truncf(myrandf); // } void print_array(int* arr, int N){ for(int i = 0; i < N; i++) { if (i == 0){ printf("[%d, ", arr[i]); } else if (i == N -1 ) { printf("%d]\n", arr[i]); } else { printf("%d, ", arr[i]); } } } void print_darray(int* d_arr, int N){ int *arr; cudaMallocHost(&arr, N * sizeof(int)); checkCudaErrors(cudaMemcpy(arr, d_arr, sizeof(int) * N, cudaMemcpyDeviceToHost)); print_array(arr, N); cudaFree(arr); } long time_diff_us(struct timeval st, struct timeval et) { return (et.tv_sec-st.tv_sec)*1000000+(et.tv_usec-st.tv_usec); } __global__ void mergeScanToIndex( int* d_toCollect, int* d_sumBlock, int N ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= N) return; if (blockIdx.x > 0) d_toCollect[idx] = d_sumBlock[blockIdx.x-1] + d_toCollect[idx]; } void recursive_scan( int* d_toScan, int* d_toCollect, int** storage, int i, int N ) { int numBlockSize = (N + FIND_MAX_THREADS - 1) / FIND_MAX_THREADS; // int* d_sumBlock; // For debug // printf("before N = %d ", N); // print_darray(d_toScan, N); // checkCudaErrors(cudaMalloc(&d_sumBlock, sizeof(int) * (numBlockSize + 1))); scanSB<<<numBlockSize, FIND_MAX_THREADS>>>( d_toScan, d_toCollect, storage[i], N, numBlockSize ); cudaDeviceSynchronize(); // printf("mid N = %d ", N); // print_darray(d_sumBlock, numBlockSize+1); // printf("scan N = %d\n", N); if (numBlockSize > FIND_MAX_THREADS) { recursive_scan( storage[i], storage[i], storage, i+1, numBlockSize ); mergeScanToIndex<<<numBlockSize, FIND_MAX_THREADS>>>(d_toCollect, storage[i], N); cudaDeviceSynchronize(); } else { scanBlockSum<<<1, FIND_MAX_THREADS>>>(storage[i], numBlockSize); // printf("mid2 N = %d ", N); // print_darray(storage[i], numBlockSize+1); cudaDeviceSynchronize(); // printf("merge N = %d\n", N); mergeScanToIndex<<<numBlockSize, FIND_MAX_THREADS>>>(d_toCollect, storage[i], N); cudaDeviceSynchronize(); // printf("merge2 N = %d\n", N); } // checkCudaErrors(cudaFree(storage[i])); // printf("After N = %d ", N); // print_darray(d_toCollect, N); } void scanAndMerge(int* d_arr, int* d_collectScan, int* d_collectSumScan, int* d_sumBlock, int* d_interVals, int** storage, int MSB, int N, int compare, int numMaxBlock, int offset) { markArr<<<numMaxBlock, FIND_MAX_THREADS>>>(d_arr, d_collectScan, MSB, N, compare) ; cudaDeviceSynchronize(); // checkCudaErrors(cudaGetLastError()); // printf("Mark!!\n"); recursive_scan( d_collectScan, d_collectSumScan, storage, 0, N ); // printf("Scan!!\n"); // scanSB<<<numMaxBlock,FIND_MAX_THREADS>>>(d_collectScan, // d_collectSumScan, // d_sumBlock, // N, // numMaxBlock); // scanSB<<<numMaxBlock,FIND_MAX_THREADS>>>(d_arr, // d_collectScan, // d_collectSumScan, // d_sumBlock, // MSB, // N, // compare, // numMaxBlock); // cudaDeviceSynchronize(); //checkCudaErrors(cudaGetLastError()); // print_d_array(d_collectScan, N); // print_d_array(d_collectSumScan, N); // scanBlockSum<<<1, numMaxBlock>>>(d_sumBlock, numMaxBlock); // cudaDeviceSynchronize(); //checkCudaErrors(cudaGetLastError()); // d_sumBlock[0] = 0; // checkCudaErrors(cudaMemset(d_sumBlock, 0, sizeof(int))); // printf("%d\n", offset); mergeScan<<<numMaxBlock, FIND_MAX_THREADS>>>(d_arr, d_collectScan, d_collectSumScan, d_interVals, offset, N); cudaDeviceSynchronize(); } int main(int argc, char *argv[]) { struct timeval st; struct timeval et; srand (time(0)); int N = 1000000; if (argc >= 2) { N = atol(argv[1]); } int numMaxBlock = (N + FIND_MAX_THREADS - 1) / FIND_MAX_THREADS; int *d_arr, *arr; checkCudaErrors(cudaMallocHost(&arr, sizeof(int) * N)); make_data(arr, N); // print_array(arr, N); checkCudaErrors(cudaMalloc(&d_arr, sizeof(int) * N)); checkCudaErrors(cudaMemcpy(d_arr, arr, sizeof(int) * N, cudaMemcpyHostToDevice)); // curandState *state; // cudaMalloc(&state, sizeof(curandState)); // makeData<<<numMaxBlock, FIND_MAX_THREADS>>>(arr, N, state); // cudaDeviceSynchronize(); printf("Prepared data\n"); // print_array(arr, N); int *d_collectSumScan, *d_interVals, *d_sumBlock; int *d_collectScan; checkCudaErrors(cudaMalloc(&d_collectSumScan, sizeof(int) * N)); checkCudaErrors(cudaMalloc(&d_collectScan, sizeof(int) * N)); checkCudaErrors(cudaMalloc(&d_interVals, sizeof(int) * N)); checkCudaErrors(cudaMalloc(&d_sumBlock, sizeof(int) * (numMaxBlock+1))); int* d_collectMax, *d_arbitary; cudaMalloc(&d_collectMax, sizeof(int) * numMaxBlock); cudaMalloc(&d_arbitary, sizeof(int) * numMaxBlock); int num_storage = 0; int current_N = (N + FIND_MAX_THREADS - 1) / FIND_MAX_THREADS; while(current_N > FIND_MAX_THREADS){ num_storage++; current_N = (current_N + FIND_MAX_THREADS - 1) / FIND_MAX_THREADS; } int* storage[num_storage]; current_N = N; for (int i = 0; i < num_storage+1; i++) { current_N = (current_N + FIND_MAX_THREADS - 1) / FIND_MAX_THREADS; checkCudaErrors(cudaMalloc(&storage[i], sizeof(int) * (current_N + 1))); } /* Search for Maximum */ gettimeofday(&st, NULL); int MAX = 0; findMax <<<numMaxBlock,FIND_MAX_THREADS>>>(arr, d_collectMax, N); cudaDeviceSynchronize(); // for (int i = 0; i < numMaxBlock; i++){ // if (MAX < d_collectMax[i]) MAX = d_collectMax[i]; // } // print_array(d_collectMax, numMaxBlock); int num_groups = numMaxBlock; while (num_groups > FIND_MAX_THREADS) { num_groups = (num_groups + FIND_MAX_THREADS - 1) / FIND_MAX_THREADS; findMax<<<num_groups, FIND_MAX_THREADS>>>(d_collectMax, d_arbitary, num_groups); cudaDeviceSynchronize(); checkCudaErrors(cudaMemcpy(d_collectMax, d_arbitary, sizeof(int) * num_groups, cudaMemcpyDeviceToDevice)); } findMax<<<1, num_groups>>>(d_collectMax, d_collectMax, numMaxBlock); cudaDeviceSynchronize(); // cudaDeviceSynchronize(); checkCudaErrors(cudaMemcpy(&MAX, &d_collectMax[0], sizeof(int), cudaMemcpyDeviceToHost)); int step = (int)log2(MAX) + 1; printf("max = %d\n", MAX); printf("Max found\n"); printf("num Max block = %d\n", numMaxBlock); // printf("hello11\n"); /* Loop through each bit digit */ int MSB = 1; int i = 0; int offset; for (i = 0; i < step; i++) { // printf("hello\n"); if (i % 2 == 0){ scanAndMerge(d_arr, d_collectScan, d_collectSumScan, d_sumBlock, d_interVals, storage, MSB, N, 0, numMaxBlock, 0); // printf("hello2\n"); checkCudaErrors(cudaMemcpy(&offset, &d_collectSumScan[N-1], sizeof(int), cudaMemcpyDeviceToHost)); // printf("offset = %d\n", offset); // printf("hello3\n"); scanAndMerge(d_arr, d_collectScan, d_collectSumScan, d_sumBlock, d_interVals, storage, MSB, N, MSB, numMaxBlock, offset); } else { scanAndMerge(d_interVals, d_collectScan, d_collectSumScan, d_sumBlock, d_arr, storage, MSB, N, 0, numMaxBlock, 0); // int offset = d_sumBlock[numMaxBlock]; // checkCudaErrors(cudaMemcpy(&offset, &d_sumBlock[numMaxBlock], sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&offset, &d_collectSumScan[N-1], sizeof(int), cudaMemcpyDeviceToHost)); // printf("offset = %d\n", offset); scanAndMerge(d_interVals, d_collectScan, d_collectSumScan, d_sumBlock, d_arr, storage, MSB, N, MSB, numMaxBlock, offset); } MSB *= 2; // if (i % 2==0){ // checkCudaErrors(cudaMemcpy(arr, d_interVals, sizeof(int) * N, cudaMemcpyDeviceToHost)); // } else { // checkCudaErrors(cudaMemcpy(arr, d_arr, sizeof(int) * N, cudaMemcpyDeviceToHost)); // } // print_array(arr, N); } if (i % 2!=0){ checkCudaErrors(cudaMemcpy(arr, d_interVals, sizeof(int) * N, cudaMemcpyDeviceToHost)); } else { checkCudaErrors(cudaMemcpy(arr, d_arr, sizeof(int) * N, cudaMemcpyDeviceToHost)); } gettimeofday(&et, NULL); /* get start time */ long us = time_diff_us(st, et); printf("sorting %d data took %ld us\n",N, us); // printf("Finish lOOP"); // print_array(arr, N); check_sort(arr, N); // print_array(arr, N); checkCudaErrors(cudaFree(d_collectSumScan)); checkCudaErrors(cudaFree(d_collectScan)); checkCudaErrors(cudaFree(d_interVals)); checkCudaErrors(cudaFree(d_sumBlock)); checkCudaErrors(cudaFree(d_arr)); // checkCudaErrors(cudaFree(state)); return 0; }
21,244
#include<stdio.h> #include<stdlib.h> #include<string.h> #include<math.h> #include<time.h> #include<cuda.h> #include<cufft.h> #include<cuda_runtime.h> //#include <cutil_inline.h> //#include <cutil.h> int main() { int nx,nt,i,ix,it; int NX,BATCH; float **a_input; float *input; float *amp; cufftHandle plan; cufftComplex *data; time_t t_z,t_f; nx=5300; nt=12001; a_input=(float**)calloc(nt,sizeof(float*)); for(it=0;it<nt;it++){ a_input[it]=(float*)calloc(nx,sizeof(float)); } input=(float*)calloc(nx*nt,sizeof(float)); amp=(float*)calloc(nt/2,sizeof(float)); FILE *fp; fp=fopen("rec_u_3200.bin","rb"); for(it=0;it<nt;it++){ for(ix=0;ix<nx;ix++){ fread(&a_input[it][ix],sizeof(float),1,fp); } } fclose(fp); for(ix=0;ix<nx;ix++){ for(it=0;it<nt;it++){ input[ix*nt+it]=a_input[it][ix]; } } printf("re_transpose_done !!!\n"); NX=nt; BATCH=10; cudaMalloc((void**)&data, sizeof(cufftComplex)*(NX/2+1)*BATCH); cudaMemcpy(data,input,NX*BATCH*sizeof(float),cudaMemcpyHostToDevice); t_z=time(NULL); cufftPlan1d(&plan, NX, CUFFT_R2C, BATCH); cufftExecR2C(plan, (cufftReal*)data, data); cufftDestroy(plan); t_f=time(NULL); printf("\nCalculating time:%f (s) \n\n",t_f-t_z); cudaMemcpy(input,data,nx*nt*sizeof(float),cudaMemcpyDeviceToHost); fp=fopen("bofore_cufft.bin","wb"); for(it=0;it<nt;it++){ for(ix=0;ix<1;ix++){ fwrite(&a_input[it][ix],sizeof(float),1,fp); } } fclose(fp); fp=fopen("after_cufft.bin","wb"); for(it=0;it<nt;it++){ fwrite(&input[it],sizeof(float),1,fp); } fclose(fp); cudaFree(data); for(i=0;i<nt/2;i++){ amp[i]=sqrt(input[nt+2*i]*input[nt+2*i]+input[nt+2*i+1]*input[nt+2*i+1]); } fp=fopen("amp.bin","wb"); fwrite(amp,sizeof(float),nt/2,fp); fclose(fp); return 0; }
21,245
#include "includes.h" /** * Nathan Dunn * CS-4370-90 Par. Prog. Many-Core GPUs * Professor Liu * 10-24-19 * Tiled Matrix Multiplication */ #define N 8 // size of the matrices to be multiplied #define TILE_WIDTH 4 // size of the tiles /** * Computes the matrix multiplication on the CPU * m - First matrix to be multiplied * n - Second matrix to be multiplied * p - Product of m and n * width - Size of the matrices being operated upon */ __global__ void MatrixMulKernel(float* d_M, float* d_N, float* d_P, int Width) { __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the Pd element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; double Pvalue = 0; // Loop over the Md and Nd tiles required to compute the Pd element for (int m = 0; m < Width/TILE_WIDTH; ++m){ // Collaborative loading of Md and Nd tiles into shared memory ds_M[ty][tx] = d_M[Row*Width + m*TILE_WIDTH+tx]; ds_N[ty][tx] = d_N[Col+(m*TILE_WIDTH+ty)*Width]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } d_P[Row*Width+Col] = Pvalue; }
21,246
#include "includes.h" __global__ void Evolve( int *val, int *aux, int n ) { int up, upright, right, rightdown, down, downleft, left, leftup; int sum = 0, estado; const int tx = threadIdx.x + 1, ty = threadIdx.y + 1; const int i = blockIdx.y * blockDim.y + threadIdx.y; const int j = blockIdx.x * blockDim.x + threadIdx.x; const int b2 = BSIZE + 2; __shared__ float sdata[ b2 ][ b2 ]; sdata[ ty ][ tx ] = val[ i * n + j ]; if( ( tx == 1 ) && ( ty == 1 ) ) { int stx = blockIdx.x * blockDim.x; int sty = blockIdx.y * blockDim.y; sdata[ 0 ][ 0 ] = val[ ( sty - 1 ) * n + stx - 1 ]; sdata[ 0 ][ b2 - 1 ] = val[ ( sty - 1 ) * n + stx + BSIZE ]; sdata[ b2 - 1 ][ 0 ] = val[ ( sty + BSIZE ) * n + stx - 1 ]; sdata[ b2 - 1 ][ b2 - 1 ] = val[ ( sty + BSIZE ) * n + stx + BSIZE ]; } if( ( j > 0 ) && ( tx == 1 ) ) { sdata[ ty ][ 0 ] = val[ i * n + j - 1 ]; } if( ( j < ( n - 1 ) ) && ( tx == BSIZE ) ) { sdata[ ty ][ b2 - 1 ] = val[ i * n + j + 1 ]; } if( ( i > 0 ) && ( ty == 1 ) ) { sdata[ 0 ][ tx ] = val[ ( i - 1 ) * n + j ]; } if( ( i < ( n - 1 ) ) && ( ty == BSIZE ) ) { sdata[ b2 - 1 ][ tx ] = val[ ( i + 1 ) * n + j ]; } __syncthreads( ); if( ( i > 0 ) && ( i < ( n - 1 ) ) && ( j > 0 ) && ( j < ( n - 1 ) ) ) { estado = sdata[ ty ][ tx ]; up = sdata[ ty - 1 ][ tx ]; upright = sdata[ ty - 1 ][ tx + 1 ]; right = sdata[ ty ][ tx + 1 ]; rightdown = sdata[ ty + 1 ][ tx + 1 ]; down = sdata[ ty + 1 ][ tx ]; downleft = sdata[ ty + 1 ][ tx - 1 ]; left = sdata[ ty ][ tx - 1 ]; leftup = sdata[ ty - 1 ][ tx - 1 ]; sum = up + upright + right + rightdown + down + downleft + left + leftup; if( sum == 3 ) { estado = 1; } else if( ( estado == 1 ) && ( ( sum < 2 ) || ( sum > 3 ) ) ) { estado = 0; } aux[ i * n + j ] = estado; } }
21,247
#include "kernel_shared.cuh" #include "kernel_compute.cuh" #include "cuda_globals.cuh" #include "globals.cuh" #include "const.cuh" #include <stdio.h> #include <chrono> #define FIELD_AT_IS_HEAD(O) (field[O] == CELL_ELECTRON_HEAD) __global__ void computeCell(const int width, const char* field, char* outfield) { const int offset = getFieldOffsetAt(0, 0, width); switch (field[offset]) { case CELL_ELECTRON_HEAD: outfield[offset] = CELL_ELECTRON_TAIL; break; case CELL_ELECTRON_TAIL: outfield[offset] = CELL_CONDUCTOR; break; case CELL_CONDUCTOR: { int cubeOffset = offset - (1 + width); char neighbourElectronHeads = FIELD_AT_IS_HEAD(cubeOffset) + FIELD_AT_IS_HEAD(cubeOffset + 1) + FIELD_AT_IS_HEAD(cubeOffset + 2); cubeOffset += width; neighbourElectronHeads += FIELD_AT_IS_HEAD(cubeOffset) + FIELD_AT_IS_HEAD(cubeOffset + 2); cubeOffset += width; neighbourElectronHeads += FIELD_AT_IS_HEAD(cubeOffset) + FIELD_AT_IS_HEAD(cubeOffset + 1) + FIELD_AT_IS_HEAD(cubeOffset + 2); outfield[offset] = (neighbourElectronHeads == 1 || neighbourElectronHeads == 2) ? CELL_ELECTRON_HEAD : CELL_CONDUCTOR; break; } } } void runComputeCell(int iterations) { for (int i = 0; i < iterations; i++) { std::swap(d_outfield, d_field); computeCell<<<numBlocks, threadsPerBlock>>>(width, d_field, d_outfield); } cudaDeviceSynchronize(); } static int timedIterations = 100; void runComputeCellFor(float msTarget) { runComputeCell(1); auto t1 = std::chrono::high_resolution_clock::now(); runComputeCell(timedIterations); auto t2 = std::chrono::high_resolution_clock::now(); float msActual = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f; timedIterations = (int)(timedIterations * (msTarget / msActual)); if (timedIterations < 1) { timedIterations = 1; } }
21,248
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ // INSERT KERNEL CODE HERE __shared__ float sdata[512]; int i = blockIdx.x * 2 * blockDim.x + threadIdx.x; int tx = threadIdx.x; float x = 0; float y = 0; if (i < size) { x = in[i]; } int secondLoad = i + blockDim.x; if (secondLoad < size) { y = in[secondLoad]; } sdata[tx] = x + y; __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (tx < offset) sdata[tx] += sdata[tx + offset]; __syncthreads(); } if (tx == 0) { out[blockIdx.x] = sdata[0]; } }
21,249
#include <cuda.h> #include <cmath> #include <iostream> #include <random> #include <ctime> /** * generate random double with range: @fMin ~ @fMax */ double fRand(double fMin, double fMax) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<> dis(fMin, fMax); double a = dis(gen); return a; } /** * create balls with radius @r, coordinate (@_x, @_y), velocity vector <@v_x, @v_y> */ struct Obstacle { public: double _x, _y, v_x, v_y, r; Obstacle() { _x = fRand(-100.0, 100.0); _y = fRand(-100.0, 100.0); v_x = fRand(0.0, 5.0); v_y = fRand(0.0, 5.0); r = 1.0; } }; /** * @n obstacles * for each obstacle, return time elapsed when collison starts @t_s and ends @t_e * stored in @list[] */ __global__ void intersectTime_g(int n, Obstacle points[], double list[]) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; // process each obstacle for(int j = index; j < n; j += stride) { Obstacle a = points[j]; //distance @d b/w obstacle and scooter double d = sqrt(a._x * a._x + a._y * a._y); //distance travelled when collision starts @d_s and ends @d_e double d_s = d - 2.0; double d_e = d + 2.0; //velocity @v of obstacle double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y); //time elapsed when collision starts @t_s and ends @t_e double t_s = d_s / v; double t_e = d_e / v; //store in list[j] list[2 * j] = t_s; list[2 * j + 1] = t_e; //for test output //printf("GPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n", a._x, a._y, v, t_s, t_e); } } void intersectTime_c(int n, Obstacle points[], double list[]) { for(int j = 0; j < n; j++) { Obstacle a = points[j]; //distance @d b/w obstacle and scooter double d = sqrt(a._x * a._x + a._y * a._y); //distance travelled when collision starts @d_s and ends @d_e double d_s = d - 2.0; double d_e = d + 2.0; //velocity @v of obstacle double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y); //time elapsed when collision starts @t_s and ends @t_e double t_s = d_s / v; double t_e = d_e / v; //store in list[j] list[2 * j] = t_s; list[2 * j + 1] = t_e; // for test output //printf("CPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n",a._x, a._y, v, t_s, t_e); } } int main() { //(@n*10) obstacles for(int n = 0; n < 100; n++) { double total_time_c = 0.0; double total_time_g = 0.0; Obstacle* points_g; cudaMallocManaged(&points_g, n * 10 * sizeof(Obstacle)); double* list_g; cudaMallocManaged(&list_g, n * 10 * 2 * sizeof(double)); for(int s = 0; s < 1000; s++) { //create same set of points for both CPU and GPU Obstacle * points = new Obstacle[n * 10]; for(int i = 0; i < n * 10; i++) { points[i] = Obstacle(); } //GPU //copy points to GPU cudaMemcpy(points_g, points, n * 10 * sizeof(Obstacle), cudaMemcpyHostToDevice); //initialize list: store 2 time data for each obstacle //process obstacles int blockSize = 256; int numBlocks = (n * 10 + blockSize - 1) / blockSize; //timing clock_t time = clock(); intersectTime_g<<<numBlocks, blockSize>>>(n * 10, points_g, list_g); cudaMemcpy(points, points_g, n * 10 * sizeof(Obstacle), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); time = clock() - time; double elapsed_g = time / (double) CLOCKS_PER_SEC; total_time_g += elapsed_g; //CPU double* list_c = new double[n * 10 * 2]; clock_t e = clock(); intersectTime_c(n * 10, points, list_c); e = clock() - e; double elapsed_c = e / (double) CLOCKS_PER_SEC; total_time_c += elapsed_c; } printf("%d GPU: %.8lf s ", (n * 10), total_time_g); printf("CPU: %.8lf s ", total_time_c); printf("%.2lf \n", total_time_c / total_time_g); cudaFree(points_g); cudaFree(list_g); } }
21,250
#include <stdio.h> #include <cuda.h> #include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; // New #define gpuErrCheck( err ) (gpuAssert( err, __FILE__, __LINE__ )) static void gpuAssert(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); #ifdef _WIN32 system("pause"); #endif exit(EXIT_FAILURE); } } int main() { cout << "Hello World\n" << endl; int n = 10000; int *a; int *dev_a; if (NULL == (a = (int*)malloc(n*sizeof(int)))) exit(20); int c = 32 +23; c += 1; printf("%d\n", c); gpuErrCheck(cudaMalloc((void**)&dev_a, n * sizeof(int))); gpuErrCheck(cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice)); }
21,251
#include <cstdio> #include <ctime> /* we need these includes for CUDA's random number stuff */ #include <curand.h> #include <curand_kernel.h> #include <iostream> #include <random> #include <chrono> #include "cuda_runtime_api.h" #define N 8 #define MAX 20 #define PERCENTAGEINTERVAL 5 /* this GPU kernel function is used to initialize the random states */ __global__ void init(unsigned int seed, curandState_t* states) { /* we have to initialize the state */ curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ blockIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ blockIdx.y, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[blockIdx.x]); } /* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */ __global__ void randoms(curandState_t* states, uint8_t* numbers) { /* curand works like rand - except that it takes a state as a parameter */ uint32_t randInt = curand(&states[blockIdx.x]); numbers[4 * blockIdx.x] = ((randInt & 0xFF000000UL) >> 24) % MAX + 1; numbers[4 * blockIdx.x + 1] = ((randInt & 0x00FF0000UL) >> 16) % MAX + 1; numbers[4 * blockIdx.x + 2] = ((randInt & 0x0000FF00UL) >> 8 ) % MAX + 1; numbers[4 * blockIdx.x + 3] = ((randInt & 0x000000FFUL) ) % MAX + 1; } /* this GPU kernel takes an array of ints and adds 1 to the passcounter if they're greater than or equal to the given int */ __global__ void passcheck(unsigned long long int* passcounter, int8_t* numberstopass, const uint8_t* numbers) { if (numbers[blockIdx.x * blockDim.x + threadIdx.x] >= numberstopass[0]) { atomicAdd(passcounter, 1); } } void printLoadingBar(long long int rolled, long long int counterStop, double start_time ) { printf("Rolled: %lld%% ", (long long int)ceil(rolled * 100/counterStop)); auto end = std::chrono::system_clock::now().time_since_epoch().count(); double diff = end - start_time; printf(": %lld rolls per second \n", (long long int)ceil(((double)rolled * 10000000) / diff)); } // you must first call the cudaGetDeviceProperties() function, then pass // the devProp structure returned to this function: int getSPcores(cudaDeviceProp devProp) { int cores = 0; int mp = devProp.multiProcessorCount; switch (devProp.major){ case 2: // Fermi if (devProp.minor == 1) cores = mp * 48; else cores = mp * 32; break; case 3: // Kepler cores = mp * 192; break; case 5: // Maxwell cores = mp * 128; break; case 6: // Pascal if ((devProp.minor == 1) || (devProp.minor == 2)) cores = mp * 128; else if (devProp.minor == 0) cores = mp * 64; else printf("Unknown device type\n"); break; case 7: // Volta and Turing if ((devProp.minor == 0) || (devProp.minor == 5)) cores = mp * 64; else printf("Unknown device type\n"); break; case 8: // Ampere if (devProp.minor == 0) cores = mp * 64; else if (devProp.minor == 6) cores = mp * 128; else printf("Unknown device type\n"); break; default: printf("Unknown device type\n"); break; } return cores; } int main() { cudaDeviceProp cudaDeviceProp; cudaGetDeviceProperties(&cudaDeviceProp, 0); int nrCores = getSPcores(cudaDeviceProp); std::cout << "ShaderCores: " << nrCores << "\n"; // After how many rolls should you stop long long int counter = 0; long long int counterstop = (long long int)(INT32_MAX) * 2; // Cuda performance metrics cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // /* allocate an array of int8_t on the CPU and GPU */ // uint8_t cpu_nums[ 4]; uint8_t* gpu_nums; cudaMalloc((void **) &gpu_nums, nrCores * N * 4 * sizeof(uint8_t)); /* allocate an array of int8_t on the CPU and GPU */ unsigned long long int cpu_pass_counter[1]; cpu_pass_counter[0] = 0; unsigned long long int* gpu_pass_counter; cudaMalloc((void **) &gpu_pass_counter, 1 * sizeof(unsigned long long int)); cudaMemcpy(gpu_pass_counter, cpu_pass_counter, 1 * sizeof(unsigned long long int), cudaMemcpyHostToDevice); /* allocate an array of int8_t on the CPU and GPU of numbers that should be checked against */ int8_t cpu_num_to_pass[1]; cpu_num_to_pass[0] = 11; int8_t* gpu_num_to_roll; cudaMalloc((void **) &gpu_num_to_roll, 1 * sizeof(int8_t)); cudaMemcpy(gpu_num_to_roll, cpu_num_to_pass, 1 * sizeof(int8_t), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); /* allocate space on the GPU for the random states */ curandState_t* states; cudaMalloc((void **) &states, nrCores * N * sizeof(curandState_t)); /* invoke the GPU to initialize all of the random states */ init<<<nrCores * N, 1>>>(time(nullptr), states); auto start_timer = std::chrono::system_clock::now(); printLoadingBar(counter, counterstop, start_timer.time_since_epoch().count()); cudaEventRecord(start); int loopcounter = 0; while (counter < counterstop) { /* invoke the kernel to get some random numbers */ randoms<<<nrCores * N, 1>>>(states, gpu_nums); passcheck<<<nrCores * N, 4>>>(gpu_pass_counter, gpu_num_to_roll, gpu_nums); /* copy the random numbers back */ // cudaMemcpy(cpu_nums, gpu_nums, nrCores * N * 4 * sizeof(int8_t), cudaMemcpyDeviceToHost); // cudaMemcpy(cpu_pass_counter, gpu_pass_counter, 1 * sizeof(int64_t), cudaMemcpyDeviceToHost); counter += nrCores * N * 4; if ((loopcounter % ((counterstop/(N * nrCores * 4)) / (int)( 1 / ((float)(PERCENTAGEINTERVAL) / 100 ) ))) == 0) { printLoadingBar(counter, counterstop, start_timer.time_since_epoch().count()); } loopcounter++; } cudaEventRecord(stop); printLoadingBar(counter, counterstop, start_timer.time_since_epoch().count()); cudaMemcpy(cpu_pass_counter, gpu_pass_counter, 1 * sizeof(unsigned long long int), cudaMemcpyDeviceToHost); /* free memory from GPU */ cudaFree(states); cudaFree(gpu_nums); cudaFree(gpu_pass_counter); cudaFree(gpu_num_to_roll); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Ran %lld simulations resulting in %lld d%i rolls above %i taking %fs \n", counter, cpu_pass_counter[0], MAX, cpu_num_to_pass[0], milliseconds/1000); printf("Averaged: %lld rolls per second", (long long int)(counter / (milliseconds/1000))); do { std::cout << '\n' << "Enter any key to continue..."; } while (std::cin.get() != '\n'); return 0; }
21,252
/* * Author: * Yixin Li, Email: liyixin@mit.edu * convert the image from LAB to RGB */ __global__ void lab_to_rgb( double * img, const int nPts) { // getting the index of the pixel const int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPts) return; double L = img[3*t]; double La = img[3*t+1]; double Lb = img[3*t+2]; if (L!=L || La!=La || Lb!=Lb) return; //convert from LAB to XYZ double fy = (L+16) / 116; double fx = La/500 + fy; double fz = fy-Lb/200; double x,y,z; double xcube = pow(fx,3); double ycube = pow(fy,3); double zcube = pow(fz,3); if (ycube>0.008856) y = ycube; else y = (fy-16.0/116.0)/7.787; if (xcube>0.008856) x = xcube; else x = (fx - 16.0/116.0)/7.787; if (zcube>0.008856) z = zcube; else z = (fz - 16.0/116.0)/7.787; double X = 0.950456 * x; double Y = 1.000 * y; double Z = 1.088754 * z; //convert from XYZ to rgb double R = X * 3.2406 + Y * -1.5372 + Z * -0.4986; double G = X * -0.9689 + Y * 1.8758 + Z * 0.0415; double B = X * 0.0557 + Y * -0.2040 + Z * 1.0570; double r,g,b; if (R>0.0031308) r = 1.055 * (pow(R,(1.0/2.4))) - 0.055; else r = 12.92 * R; if (G>0.0031308) g = 1.055 * ( pow(G,(1.0/2.4))) - 0.055; else g= 12.92 * G; if (B>0.0031308) b = 1.055 * (pow(B, (1.0/2.4))) - 0.055; else b = 12.92 * B; img[3*t] = min(255.0, r * 255.0); img[3*t+1] = min(255.0, g * 255.0); img[3*t+2] = min(255.0, b * 255.0); }
21,253
#include <iostream> #include <stdio.h> #include <malloc.h> #include <cuda.h> #include <sys/time.h> // helper for main() long readList(long**); // data[], size, threads, blocks, void mergesort(long*, long, dim3, dim3); // A[]. B[], size, width, slices, nThreads __global__ void gpu_mergesort(long*, long*, long, long, long, dim3*, dim3*); __device__ void gpu_bottomUpMerge(long*, long*, long, long, long); // profiling int tm(); #define min(a, b) (a < b ? a : b) bool verbose; int main(int argc, char *argv[]) { int size_all; std::cin >> size_all; dim3 threadsPerBlock; dim3 blocksPerGrid; threadsPerBlock.x = 32; threadsPerBlock.y = 1; threadsPerBlock.z = 1; blocksPerGrid.x = 8; blocksPerGrid.y = 1; blocksPerGrid.z = 1; // // Parse argv // tm(); for (int i = 1; i < argc; i++) { if (argv[i][0] == '-' && argv[i][1] && !argv[i][2]) { char arg = argv[i][1]; unsigned int* toSet = 0; switch(arg) { case 'x': toSet = &threadsPerBlock.x; break; case 'y': toSet = &threadsPerBlock.y; break; case 'z': toSet = &threadsPerBlock.z; break; case 'X': toSet = &blocksPerGrid.x; break; case 'Y': toSet = &blocksPerGrid.y; break; case 'Z': toSet = &blocksPerGrid.z; break; case 'v': verbose = true; break; default: std::cout << "unknown argument: " << arg << '\n'; return -1; } if (toSet) { i++; *toSet = (unsigned int) strtol(argv[i], 0, 10); } } else { if (argv[i][0] == '?' && !argv[i][1]) std::cout << "help:\n"; else std::cout << "invalid argument: " << argv[i] << '\n'; return -1; } } if (verbose) { std::cout << "parse argv " << tm() << " microseconds\n"; std::cout << "\nthreadsPerBlock:" << "\n x: " << threadsPerBlock.x << "\n y: " << threadsPerBlock.y << "\n z: " << threadsPerBlock.z << "\n\nblocksPerGrid:" << "\n x:" << blocksPerGrid.x << "\n y:" << blocksPerGrid.y << "\n z:" << blocksPerGrid.z << "\n\n total threads: " << threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z * blocksPerGrid.x * blocksPerGrid.y * blocksPerGrid.z << "\n\n"; } // // Read numbers from stdin // long* data; long size = readList(&data); if (!size) return -1; if (verbose) std::cout << "sorting " << size << " numbers\n\n"; for (int i = 0; i < size; i++) { std::cout << data[i] << '\n'; } // merge-sort the data mergesort(data, size, threadsPerBlock, blocksPerGrid); tm(); // // Print out the list // for (int i = 0; i < size; i++) { std::cout << data[i] << '\n'; } if (verbose) { std::cout << "print list to stdout: " << tm() << " microseconds\n"; } } void mergesort(long* data, long size, dim3 threadsPerBlock, dim3 blocksPerGrid) { // // Allocate two arrays on the GPU // we switch back and forth between them during the sort // long* D_data; long* D_swp; dim3* D_threads; dim3* D_blocks; cudaError_t error = cudaSuccess; // Actually allocate the two arrays tm(); std::cout<<"reservando memoria con cudamalloc"<<std::endl; error = cudaMalloc((void**) &D_data, size * sizeof(long)); if(error != cudaSuccess){ std::cout<<"Error reservando memoria para D_data"<<std::endl; } std::cout<<"pass 1"<<std::endl; error = cudaMalloc((void**) &D_swp, size * sizeof(long)); if(error != cudaSuccess){ std::cout<<"Error reservando memoria para D_swp"<<std::endl; } std::cout<<"pass 2"<<std::endl; if (verbose) std::cout << "cudaMalloc device lists: " << tm() << " microseconds\n"; // Copy from our input list into the first array cudaMemcpy(D_data, data, size * sizeof(long), cudaMemcpyHostToDevice); std::cout<<"copy 1"<<std::endl; if (verbose) std::cout << "cudaMemcpy list to device: " << tm() << " microseconds\n"; // // Copy the thread / block info to the GPU as well // error = cudaMalloc((void**) &D_threads, sizeof(dim3)); if(error != cudaSuccess){ std::cout<<"Error reservando memoria para D_threads"<<std::endl; } error = cudaMalloc((void**) &D_blocks, sizeof(dim3)); if(error != cudaSuccess){ std::cout<<"Error reservando memoria para D_blocks"<<std:: endl; } std::cout<<"pass t and b"<<std::endl; if (verbose) std::cout << "cudaMalloc device thread data: " << tm() << " microseconds\n"; cudaMemcpy(D_threads, &threadsPerBlock, sizeof(dim3), cudaMemcpyHostToDevice); cudaMemcpy(D_blocks, &blocksPerGrid, sizeof(dim3), cudaMemcpyHostToDevice); std::cout<<"copy t and b"<<std::endl; if (verbose) std::cout << "cudaMemcpy thread data to device: " << tm() << " microseconds\n"; long* A = D_data; long* B = D_swp; long nThreads = threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z * blocksPerGrid.x * blocksPerGrid.y * blocksPerGrid.z; // // Slice up the list and give pieces of it to each thread, letting the pieces grow // bigger and bigger until the whole list is sorted // std::cout<<"antes del ciclo for extraño"<<std::endl; for (int width = 2; width < (size << 1); width <<= 1) { long slices = size / ((nThreads) * width) + 1; if (verbose) { std::cout << "mergeSort - width: " << width << ", slices: " << slices << ", nThreads: " << nThreads << '\n'; tm(); } // Actually call the kernel std::cout<< "llamando a a GPU"<<std::endl; gpu_mergesort<<<blocksPerGrid, threadsPerBlock>>>(A, B, size, width, slices, D_threads, D_blocks); std::cout<< "saliendo de la GPU"<<std::endl; if (verbose) std::cout << "call mergesort kernel: " << tm() << " microseconds\n"; // Switch the input / output arrays instead of copying them around A = A == D_data ? D_swp : D_data; B = B == D_data ? D_swp : D_data; } // // Get the list back from the GPU // tm(); cudaMemcpy(data, A, size * sizeof(long), cudaMemcpyDeviceToHost); if (verbose) std::cout << "cudaMemcpy list back to host: " << tm() << " microseconds\n"; // Free the GPU memory cudaFree(A); cudaFree(B); if (verbose) std::cout << "cudaFree: " << tm() << " microseconds\n"; } // GPU helper function // calculate the id of the current thread __device__ unsigned int getIdx(dim3* threads, dim3* blocks) { int x; return threadIdx.x + threadIdx.y * (x = threads->x) + threadIdx.z * (x *= threads->y) + blockIdx.x * (x *= threads->z) + blockIdx.y * (x *= blocks->z) + blockIdx.z * (x *= blocks->y); } // // Perform a full mergesort on our section of the data. // __global__ void gpu_mergesort(long* source, long* dest, long size, long width, long slices, dim3* threads, dim3* blocks) { unsigned int idx = getIdx(threads, blocks); long start = width*idx*slices, middle, end; for (long slice = 0; slice < slices; slice++) { if (start >= size) break; middle = min(start + (width >> 1), size); end = min(start + width, size); gpu_bottomUpMerge(source, dest, start, middle, end); start += width; } } // // Finally, sort something // gets called by gpu_mergesort() for each slice // __device__ void gpu_bottomUpMerge(long* source, long* dest, long start, long middle, long end) { long i = start; long j = middle; for (long k = start; k < end; k++) { if (i < middle && (j >= end || source[i] < source[j])) { dest[k] = source[i]; i++; } else { dest[k] = source[j]; j++; } } } // read data into a minimal linked list typedef struct { int v; void* next; } LinkNode; // helper function for reading numbers from stdin // it's 'optimized' not to check validity of the characters it reads in.. long readList(long** list) { tm(); long v, size = 0; LinkNode* node = 0; LinkNode* first = 0; while (std::cin >> v) { LinkNode* next = new LinkNode(); next->v = v; if (node) node->next = next; else first = next; node = next; size++; } if (size) { *list = new long[size]; LinkNode* node = first; long i = 0; while (node) { (*list)[i++] = node->v; node = (LinkNode*) node->next; } } if (verbose) std::cout << "read stdin: " << tm() << " microseconds\n"; return size; } // // Get the time (in microseconds) since the last call to tm(); // the first value returned by this must not be trusted // timeval tStart; int tm() { timeval tEnd; gettimeofday(&tEnd, 0); int t = (tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec; tStart = tEnd; return t; }
21,254
#include <stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include<bits/stdc++.h> #include <iostream> using namespace std; int n; __global__ void BSearch(int* da,int num,int n) // kernel function definition { const int tid = blockIdx.x*blockDim.x + threadIdx.x; if(da[tid]==num) da[0]=tid; } int main() { int num,nb; printf("\n Enter number of elements :"); //you can give any large number scanf("%d",&n); int a[n+1]; time_t t; srand((unsigned)time(&t)); a[0]=-1; for(unsigned i = 1 ; i <= n ; i++) { a[i]=a[i-1]+rand()%n; //for generating sorted random sequence } printf("\n\n Generated array\n"); for(int i=1; i <=n;i++) printf("%d\t ",a[i]); printf("\n Enter number to be searched :"); scanf("%d",&num); //allocating number of blocks if(n%1024==0) nb=n/1024; else nb=n/1024 +1; int* da;//GPU parameter //int dpos; cudaMalloc(&da, (n+1)*sizeof(int)); //assign memory to parameters on GPU cudaMemcpy(da, a, (n+1)*sizeof(int), cudaMemcpyHostToDevice); //copy the array from CPU to GPU BSearch<<<nb,1024>>>(da,num,n); int result; cudaMemcpy(&result, da, sizeof(int), cudaMemcpyDeviceToHost); if(result==-1) printf("\nElement not found"); else printf("\nElement found at %d",result); return 0; }/* Enter number of elements :1002 Generated array 499 1021 1184 1782 2733 3144 3544 3810 4559 5343 6325 6717 7103 7520 7744 7921 8656 9263 9518 9893 9959 10270 10360 10614 11404 11538 11719 12494 13205 13889 14514 15472 15677 16212 16766 16920 17612 18312 18478 18918 19146 20041 20619 20980 21290 21838 22376 22420 22573 23366 23531 23750 23852 23853 24072 24711 25595 25741 26153 26492 27323 28107 28402 29184 29247 29842 30524 31279 31572 32167 32360 32628 33116 33633 34262 35060 35123 35288 35876 36092 37050 37804 37986 38792 39293 39694 40137 40520 40814 41416 42139 43010 43140 43156 43807 44000 44612 44943 45891 46542 47468 48356 49275 49433 49836 50382 51339 51552 52009 52552 52727 53140 53181 53538 54504 54793 55298 55451 56123 56922 57677 57816 58484 59369 59525 59842 59918 60432 60826 61597 61760 61825 62228 63057 63280 63832 63951 64877 65642 66219 66433 67374 68110 68111 68407 68853 69143 69944 70544 71507 71851 72204 72304 72314 73299 73301 73374 74181 74443 74657 75233 75405 75430 76409 77410 78406 78936 79802 80723 80762 81203 82084 83064 83988 84870 84891 85259 86178 86746 87714 88594 89507 89573 90299 91222 92019 92494 93237 93839 94576 95533 95710 96365 97347 98249 98903 99879 100055 100320 100961 101177 101629 102149 103091 103465 103612 104575 105064 105128 105404 105605 106295 106482 106495 106655 107511 108321 108702 109045 109456 110321 110619 110953 111471 111495 111729 111900 112647 112804 112986 113372 113491 114125 114778 114837 115592 116392 117161 117403 118013 118056 118245 118543 119521 119724 119928 120760 120771 121357 121531 121953 122402 122620 123122 124089 125080 125817 126701 127437 128077 128141 128261 129020 129719 130238 131056 131254 131317 131902 132088 132761 133135 133510 134227 134577 135155 136077 137006 137342 137848 138697 139201 140156 140969 140974 141640 142442 143184 143733 144269 144649 145262 145665 146550 146606 147528 148229 148229 149215 149246 149432 150089 150494 151056 151429 151931 152817 152856 153285 153505 154050 154072 154797 155041 155876 156606 157516 158152 158368 158825 158995 159337 160154 160727 160952 161571 161811 162737 163357 163581 164284 165090 165717 165824 165936 166682 167037 168036 168821 169351 169568 169642 170194 171136 171454 171839 172509 172736 173503 174389 174819 175757 176731 177724 177979 178176 178787 179282 179404 179633 180098 180923 181704 181795 182473 183367 183950 184729 185620 185733 186040 186146 186333 187192 187987 188493 189484 189693 190172 190928 191770 192425 192863 193677 194324 195018 195028 195284 196219 197099 197330 197729 198432 199190 199426 199552 199948 200513 201418 201703 202381 203340 203478 204090 204652 205585 205701 206252 207140 207481 207533 208261 209003 209493 210034 210421 211351 211902 212291 213155 213584 214204 214211 215089 215212 216203 216953 217472 217772 218172 218723 219702 220059 220748 221337 222256 222622 223073 223288 223540 224078 224345 225326 225604 226361 226881 227293 227725 228542 229343 229383 229627 229793 230588 230708 230997 231781 232398 232952 233782 233797 233900 234453 234825 235363 235503 235538 236442 236779 237029 237932 238553 239070 239698 240344 240363 241257 241313 241510 242219 243076 243313 244266 244287 244317 245136 245192 245752 246186 246797 247185 247380 247840 248782 249349 250348 251176 251525 252426 253338 253937 254485 254762 255625 255799 256722 257350 257416 258395 259220 259995 260576 261384 261856 262204 263042 263078 263229 263371 263587 264349 264625 265037 265257 266221 266946 267911 268702 268774 269385 269832 270504 270661 271385 271664 271996 272388 273041 273185 273300 273522 274188 274884 274912 275048 275839 276451 277371 278313 279067 279202 279650 280426 280973 281387 282126 282396 282774 283048 283391 284380 284847 285608 285752 285941 286727 286949 287276 287713 287826 288269 288928 289707 290592 291025 291686 292360 292403 292983 293597 294140 294855 295663 295980 295986 296207 297009 297031 297630 297704 297815 298401 298942 299560 300036 300513 300915 301360 301910 302495 303053 304046 305036 305371 305994 306161 306903 307198 307408 307728 308384 308883 309664 310126 310689 311222 311905 312268 312824 312850 313034 313701 314059 314530 314560 315395 316089 316521 316799 317042 317806 318388 319370 319868 320785 321388 322054 322711 323356 324232 324956 325255 325375 325624 326131 326814 327597 328534 329326 329409 330372 331094 331590 331656 331848 332374 333275 333907 334612 335537 336412 336625 337130 337732 338443 338609 338812 339187 339756 340604 341602 341639 342532 342648 342935 343080 343625 344441 344521 344602 345247 346036 346840 346979 347580 348322 348734 349234 349606 349721 349890 350884 351212 351886 352480 353265 353851 354394 354553 354707 354843 355746 355937 356712 356729 356953 357873 358181 358219 358965 359354 360037 360317 361256 361825 362706 363385 364366 364492 365290 366132 366427 367217 368133 368849 368977 369676 369976 370647 371251 371705 372512 373017 373409 373990 374258 374874 375373 375949 376350 377342 378054 378884 379154 379549 379692 380589 381410 382280 382301 382918 383374 383691 383842 384212 384243 384522 385338 385415 386111 386275 386553 387055 387725 388395 389224 389908 389938 390010 390015 390192 390254 390717 391471 391549 392407 393304 394280 394703 395469 396212 396998 397966 398024 398961 399298 400135 401097 401996 402657 403060 403121 404060 404965 405442 405795 406527 406687 407070 407620 407785 408092 408705 409079 409138 409575 409805 410507 410918 411572 411784 411937 412375 412554 413513 413633 413895 414436 415264 415423 415623 415852 416820 416957 417837 418027 418517 418873 418969 419588 420495 420502 421174 421438 421819 422550 423251 423862 424294 425153 425162 425552 426310 426504 427073 427788 427848 428679 428679 429313 430049 430249 431113 431816 431899 432387 433280 433599 434444 435433 436118 436868 437864 438219 438231 438352 439185 439644 440376 440639 440955 441443 442096 442168 442596 443565 444099 444587 445385 445919 446039 446572 447307 448037 449019 449583 449800 450419 451303 452111 452463 453030 453332 453678 454346 454660 455127 455626 456145 457090 457598 458434 458611 459519 460427 461032 461907 462347 463186 463857 464832 465538 466488 466942 467122 468052 468816 468959 469506 469898 470849 471748 472707 472958 473949 474321 474632 474834 475705 476536 477430 477808 478219 479036 479320 479637 479804 480709 481213 481217 481537 481760 482216 482485 482908 483545 484490 484675 485201 485438 486016 486492 487374 487655 488128 488746 489399 490184 491004 491273 491633 492091 492484 493255 493529 493952 494786 494973 495299 495635 495826 496472 496777 497171 498086 498814 499591 500450 501110 501411 502253 502489 503012 503734 503997 504994 505332 505995 506521 507426 508358 509244 509605 509928 510329 510710 511456 511689 512257 512327 512643 513149 Enter number to be searched :9518 Element found at 19*/
21,255
#include "array.cuh" double * malloc_2d(int num_cols, int num_rows) { int size = num_cols * num_rows; double * data; cudaMallocManaged(&data, size * sizeof(double)); return data; } double * calloc_2d(int num_cols, int num_rows) { int size = num_cols * num_rows; double * data; cudaMallocManaged(&data, size * sizeof(double)); for (int iter = 0; iter < num_rows * num_cols; iter++) { data[iter] = 0.0; } return data; } void free_2d(double * data) { cudaFree(data); } __global__ void large_matrix_multiply_kernel(double *x, double *y, double *z, int num_cols, int num_rows, bool is_sigma) { int row = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0; if (row < num_rows) { for (int col = 0; col < num_cols-1; col++) { sum += x[row * num_cols + col] * y[col]; } sum += x[row * (num_cols - 1)]; z[row] = sum; if (is_sigma) z[row] = z[row] / 3; } } __global__ void matrix_multiply_kernel(double *x, double *y, double *z, int num_cols) { int row = blockIdx.x; int index = threadIdx.x; int stride = blockDim.x; double sum = 0; for (int cord = index; cord < num_cols - 1; cord += stride) { sum += x[row * num_cols + cord] * y[cord]; } z[row * 32 + index] = sum; } void matrix_m(double * M1, double * M2, double * M3, double *Buffer, int num_cols, int num_rows, double(*modifier)(double)) { /*large_matrix_multiply_kernel << < num_rows / 32 + 1, 32 >> > (M1, M2, M3, num_cols, num_rows, false); cudaDeviceSynchronize();*/ if (num_rows >= num_cols) { large_matrix_multiply_kernel << < num_rows / 32 + 1, 32 >> > (M1, M2, M3, num_cols, num_rows, false); cudaDeviceSynchronize(); } else { matrix_multiply_kernel << < num_rows, 32 >> > (M1, M2, Buffer, num_cols); cudaDeviceSynchronize(); for (int row = 0; row < num_rows; row++) { double sum = 0.0; for (int thr = 0; thr < 32; thr++) { sum += Buffer[row * 32 + thr]; } sum += M1[row * num_cols + num_cols - 1]; M3[row] = modifier(sum); } } } double ** _malloc_2d(int num_cols, int num_rows) { double ** res = (double **)malloc(num_rows * sizeof(double *)); for (int iter = 0; iter < num_rows; iter++) { res[iter] = (double *)calloc(num_cols, sizeof(double)); } return res; } void _zerro_2d(double ** res, int num_cols, int num_rows) { for (int iter = 0; iter < num_rows; iter++) { memset(res[iter], 0.0, num_cols * sizeof(double)); } }
21,256
#include "includes.h" __global__ void set_value(float value, float *array, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) array[index] = value; }
21,257
#include <stdio.h> #include <stdlib.h> #include <math.h> float* make_matrix(const int blurKernelWidth); int main(int argc, char ** argv) { float simple_matrix[] = {0.0f, 0.2f, 0.0f, 0.2f, 0.2f, 0.2f, 0.0f, 0.2f, 0.0f}; make_matrix(3); printf("\n"); make_matrix(9); } float* make_matrix(const int blurKernelWidth) { float *h_filter; //now create the filter that they will use const float blurKernelSigma = 2.; //create and fill the filter we will convolve with h_filter = (float *)malloc(sizeof(float)* blurKernelWidth * blurKernelWidth); float filterSum = 0.f; //for normalization for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) { for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) { float filterValue = expf( -(float)(c * c + r * r) / (2.f * blurKernelSigma * blurKernelSigma)); (h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] = filterValue; filterSum += filterValue; } } float normalizationFactor = 1.f / filterSum; for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) { for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) { h_filter[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] *= normalizationFactor; } } //blurred for (int r = 0; r < blurKernelWidth; ++r) { for (int c = 0; c < blurKernelWidth; ++c) { printf("%f ", h_filter[r*blurKernelWidth + c]); } printf("\n"); } return h_filter; }
21,258
/* * How to compile (assume cuda is installed at /usr/local/cuda/) * nvcc add.cu * ./a.out */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> #include <cuda_runtime.h> #define N 2048 __global__ void add_kernel(int* a, int* b, int*c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } void random_ints(int* a, int num) { for(int i=0; i<num; i++){ a[i] = rand() % 1000; } } void add_in_cpu(int* a, int* b, int*c){ for(int i=0; i<N; i++){ c[i] = a[i] + b[i]; } } int main(void) { printf("Vector addition in CPU\n"); int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); clock_t cpu_startTime, cpu_endTime; double cpu_ElapseTime=0; cpu_startTime = clock(); float gpu_time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Launch add() kernel on GPU // add_kernel<<<N, 1>>>(d_a, d_b, d_c); add_in_cpu(a, b, c); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_time, start, stop); cpu_endTime = clock(); cpu_ElapseTime = (double)((cpu_endTime - cpu_startTime)/(CLOCKS_PER_SEC/1000.0)); printf("Time to generate: %3.3f ms (in cpu)\n", cpu_ElapseTime); printf("Time to generate: %3.3f ms (in gpu)\n", gpu_time); cudaEventDestroy(start); cudaEventDestroy(stop); // Copy result back to host // cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); printf("Result: \n"); for(int i=0; i<15; i++){ printf("%d ", c[i]); } printf(".... %d \n", c[N-1]); // Cleanup if(a) free(a); if(b) free(b); if(c) free(c); if(d_a) cudaFree(d_a); if(d_b) cudaFree(d_b); if(d_c) cudaFree(d_c); return 0; }
21,259
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdio> #include <iostream> const int N = 1024; const int BLOCKSIZE = 16; dim3 dimBlock(BLOCKSIZE, BLOCKSIZE); // N / bs + ((N % bs) != 0); dim3 dimGrid((N / dimBlock.x) + 1, (N / dimBlock.y) + 1); __global__ void addMatrix(const float* a, const float* b, float* c, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * n; if (i < n && j < n) { c[index] = a[index] + b[index]; } } int main() { printf("Kernel will be invoked with: Block(%d,%d), Grid(%d,%d)\n", dimBlock.x, dimBlock.y, dimGrid.x, dimGrid.y); auto* a = new float[N * N]; auto* b = new float[N * N]; auto* c = new float[N * N]; for (int i = 0; i < N * N; ++i) { a[i] = 1.0f; b[i] = 3.5f; } float *ad, *bd, *cd; const int size = N * N * sizeof(float); cudaMalloc((void**)&ad, size); cudaMalloc((void**)&bd, size); cudaMalloc((void**)&cd, size); cudaMemcpy(ad, a, size, cudaMemcpyHostToDevice); cudaMemcpy(bd, b, size, cudaMemcpyHostToDevice); addMatrix <<<dimGrid, dimBlock >>> (ad, bd, cd, N); cudaMemcpy(c, cd, size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int i = 0; i < N * N; i++) { std::cout << i << " " << c[i] << std::endl; } cudaFree(ad); cudaFree(bd); cudaFree(cd); delete[] a; delete[] b; delete[] c; return 0; }
21,260
#include "includes.h" /* * This code is released into the public domain. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ /////////////////////////////////////////////////////////////////////////////////////////// // Definitions and helper utilities // Block width for CUDA kernels #define BW 128 #ifdef USE_GFLAGS #ifndef _WIN32 #define gflags google #endif #else // Constant versions of gflags #define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value) #define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value) #define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value) #define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value) #define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value)) #endif /** * Computes ceil(x / y) for integral nonnegative values. */ __global__ void FillOnes(float *vec, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; vec[idx] = 1.0f; }
21,261
#include "includes.h" __global__ void conv_vertical_naive_gradParam(const int n, float *dw, const float *x, const float *dy, const int kL, const int oH, const int oW) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { int dy_offset = (i/kL)*oH*oW; int x_offset = (i/kL)*oH*oW + (i%kL)*oW; for (int k = 0; k < oH*oW; k++) { dw[i] += dy[dy_offset + k]*x[x_offset + k]; } } }
21,262
#include "includes.h" __global__ void kernel_2(float *d_data_in, float *d_data_out, int data_size) { __shared__ float s_data[BLKSIZE]; int tid = threadIdx.x; int index = tid + blockIdx.x*blockDim.x; s_data[tid] = 0.0; if (index < data_size){ s_data[tid] = d_data_in[index]; } __syncthreads(); for (int s = 2; s <= blockDim.x; s = s * 2){ index = tid * s; if (index < blockDim.x){ s_data[index] += s_data[index + s / 2]; } __syncthreads(); } if (tid == 0){ d_data_out[blockIdx.x] = s_data[tid]; } }
21,263
#include <stdio.h> #include <stdlib.h> #include <string> #include <math.h> #include <assert.h> #include <unistd.h> #include "cuda_runtime.h" void checkCUDAerror(const char *msg); // kernel to make the calculation. __global__ void calc(float* a1, float* b1, float* c1,float* a2, float* b2, float* c2, float* a3, float* b3, float* c3, float* a4, float* b4, float* c4, float* a5, float* b5, float* c5, float* a6, float* b6, float* c6, float* x, float* data, int ndata, float* LH) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // Need to calc the pearson chi2 : sum over data points of (model-data)^2/data // this will mean that x and data have the same range. float chi2 = 0; // how to calc the indices over which we will iterate? // only doing one block of 30x30 pixels. int whichGal = int(floorf(idx/80)); int start = 30*30*whichGal; int stop = start+(30*30); for(int i=start;i<stop;i++){ // calc gaussian at this point float sumG=0; float xx = x[i]; sumG += a1[idx]*__expf( -1*(xx-b1[idx]*b1[idx])/(2*c1[idx]*c1[idx]) ); sumG += a2[idx]*__expf( -1*(xx-b2[idx]*b2[idx])/(2*c2[idx]*c2[idx]) ); sumG += a3[idx]*__expf( -1*(xx-b3[idx]*b3[idx])/(2*c3[idx]*c3[idx]) ); sumG += a4[idx]*__expf( -1*(xx-b4[idx]*b4[idx])/(2*c4[idx]*c4[idx]) ); sumG += a5[idx]*__expf( -1*(xx-b5[idx]*b5[idx])/(2*c5[idx]*c5[idx]) ); sumG += a6[idx]*__expf( -1*(xx-b6[idx]*b6[idx])/(2*c6[idx]*c6[idx]) ); chi2+=( (data[i]-sumG)*(data[i]-sumG))/sumG; } LH[idx] = chi2; } ////////////// int main (int argc, char **argv) { // do I have any input args? char* name; if(argc>1) { name = argv[1]; } // how many walkers? They will be evaluated in parallel. int nWalkers = 80; // how many gals shall I try to do at the same time? int ngals = 10000; //////////////////////////// // now for the data. Assume it'll be a 30x30 pixel square. // I put this outside the loop cos the data never changes. int ndata = 30*30*ngals; printf("size of data: %d \n", ndata); size_t sizeneeded_data = ndata*sizeof(float); float *h_x = 0, *h_data = 0; h_x = (float*) malloc(sizeneeded_data); h_data = (float*) malloc(sizeneeded_data); for(int i=0;i<ndata;i++){ h_x[i]=1; h_data[i]=1; } // data GPU memory float *d_x, *d_data; cudaMalloc(&d_x, sizeneeded_data); cudaMalloc(&d_data, sizeneeded_data); cudaMemcpy(d_data, h_data, sizeneeded_data, cudaMemcpyHostToDevice); cudaMemcpy(d_x, h_x, sizeneeded_data, cudaMemcpyHostToDevice); checkCUDAerror("data memcpy"); //////////////////test//////////////////// // try looping over 1000 times. I want to see how long this takes, in this dumb implementation. //////////////////////////////////////// for(int hh=0;hh<1000;hh++){ // set up the walkers CPU memory. size_t sizeneeded = nWalkers*ngals*sizeof(float); float *h_a1 = 0, *h_b1=0, *h_c1=0; float *h_a2 = 0, *h_b2=0, *h_c2=0; float *h_a3 = 0, *h_b3=0, *h_c3=0; float *h_a4 = 0, *h_b4=0, *h_c4=0; float *h_a5 = 0, *h_b5=0, *h_c5=0; float *h_a6 = 0, *h_b6=0, *h_c6=0; h_a1 = (float*) malloc(sizeneeded); h_b1 = (float*) malloc(sizeneeded); h_c1 = (float*) malloc(sizeneeded); h_a2 = (float*) malloc(sizeneeded); h_b2 = (float*) malloc(sizeneeded); h_c2 = (float*) malloc(sizeneeded); h_a3 = (float*) malloc(sizeneeded); h_b3 = (float*) malloc(sizeneeded); h_c3 = (float*) malloc(sizeneeded); h_a4 = (float*) malloc(sizeneeded); h_b4 = (float*) malloc(sizeneeded); h_c4 = (float*) malloc(sizeneeded); h_a5 = (float*) malloc(sizeneeded); h_b5 = (float*) malloc(sizeneeded); h_c5 = (float*) malloc(sizeneeded); h_a6 = (float*) malloc(sizeneeded); h_b6 = (float*) malloc(sizeneeded); h_c6 = (float*) malloc(sizeneeded); // assign them random numbers. for(int i=0;i<nWalkers;i++){ h_a1[i]=0.1; h_b1[i]=0.5; h_c1[i]=10.0; h_a2[i]=0.1; h_b2[i]=0.5; h_c2[i]=10.0; h_a3[i]=0.1; h_b3[i]=0.5; h_c3[i]=10.0; h_a4[i]=0.1; h_b4[i]=0.5; h_c4[i]=10.0; h_a5[i]=0.1; h_b5[i]=0.5; h_c5[i]=10.0; h_a6[i]=0.1; h_b6[i]=0.5; h_c6[i]=10.0; } // assign the GPU memory float *d_a1, *d_b1, *d_c1; float *d_a2, *d_b2, *d_c2; float *d_a3, *d_b3, *d_c3; float *d_a4, *d_b4, *d_c4; float *d_a5, *d_b5, *d_c5; float *d_a6, *d_b6, *d_c6; cudaMalloc(&d_a1, sizeneeded); cudaMalloc(&d_b1, sizeneeded); cudaMalloc(&d_c1, sizeneeded); cudaMalloc(&d_a2, sizeneeded); cudaMalloc(&d_b2, sizeneeded); cudaMalloc(&d_c2, sizeneeded); cudaMalloc(&d_a3, sizeneeded); cudaMalloc(&d_b3, sizeneeded); cudaMalloc(&d_c3, sizeneeded); cudaMalloc(&d_a4, sizeneeded); cudaMalloc(&d_b4, sizeneeded); cudaMalloc(&d_c4, sizeneeded); cudaMalloc(&d_a5, sizeneeded); cudaMalloc(&d_b5, sizeneeded); cudaMalloc(&d_c5, sizeneeded); cudaMalloc(&d_a6, sizeneeded); cudaMalloc(&d_b6, sizeneeded); cudaMalloc(&d_c6, sizeneeded); /////////////////////////////// // assign the output memory. One number returned for each walker. size_t sizeneeded_out = nWalkers*ngals*sizeof(float); float *h_LH = 0; float *d_LH; h_LH = (float*) malloc(sizeneeded_out); cudaMalloc(&d_LH, sizeneeded_out); ///////////////////////// // copy data over to GPU cudaMemcpy(d_a1, h_a1, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_b1, h_b1, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_c1, h_c1, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_a2, h_a2, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_b2, h_b2, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_c2, h_c2, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_a3, h_a3, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_b3, h_b3, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_c3, h_c3, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_a4, h_a4, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_b4, h_b4, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_c4, h_c4, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_a5, h_a5, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_b5, h_b5, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_c5, h_c5, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_a6, h_a6, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_b6, h_b6, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_c6, h_c6, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_LH, h_LH, sizeneeded_out, cudaMemcpyHostToDevice); checkCUDAerror("memcpy"); // set up kernel params. // First: 80 walkers, each will eval one gaussian. int threadsPerBlock = 512; // max possible. Don't care much about mem access yet. int blocksPerGrid = int(ceil(nWalkers*ngals / float(threadsPerBlock))); if(hh==500)printf(" theads per block: %d and blocks per grid: %d for a total of: %d\n", threadsPerBlock, blocksPerGrid, threadsPerBlock*blocksPerGrid); // run it! calc<<<blocksPerGrid, threadsPerBlock >>> (d_a1, d_b1, d_c1, d_a2, d_b2, d_c2, d_a3, d_b3, d_c3, d_a4, d_b4, d_c4, d_a5, d_b5, d_c5, d_a6, d_b6, d_c6, d_x, d_data, ndata, d_LH); checkCUDAerror("kernel"); // copy the data back off the GPU cudaMemcpy(h_LH, d_LH, sizeneeded_out, cudaMemcpyDeviceToHost); // print it out... //if(hh%9999==0){ //for(int i=0;i<nWalkers*ngals;i++){ // printf("LH is: %f ", h_LH[i]); //} //} //printf("\n"); ////////////// // now free upp all the histo memory, both on CPU and GPU. Otherwise I'll fill up the device memory pretty fast! free(h_a1); free(h_b1); free(h_c1); free(h_a2); free(h_b2); free(h_c2); free(h_a3); free(h_b3); free(h_c3); free(h_a4); free(h_b4); free(h_c4); free(h_a5); free(h_b5); free(h_c5); free(h_a6); free(h_b6); free(h_c6); cudaFree(d_a1); cudaFree(d_b1); cudaFree(d_c1); cudaFree(d_a2); cudaFree(d_b2); cudaFree(d_c2); cudaFree(d_a3); cudaFree(d_b3); cudaFree(d_c3); cudaFree(d_a4); cudaFree(d_b4); cudaFree(d_c4); cudaFree(d_a5); cudaFree(d_b5); cudaFree(d_c5); cudaFree(d_a6); cudaFree(d_b6); cudaFree(d_c6); }// end loop over 1000 walker updates. free(h_x); free(h_data); cudaFree(d_x); cudaFree(d_data); printf("\n"); } ////////////////////////////////////////////////////////////// // simple function to check for errors. ////////////////////////////////////////////////////////////// void checkCUDAerror(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
21,264
// Number of threads per block. #define NT 1024 // Structure for a 3-D point. typedef struct { double x; double y; double z; }point_t; // Structure for a solution. typedef struct { int a; int b; double d; }solution_t; // Variables in global memory. __device__ int devBestSol; // Per-thread variables in shared memory. __shared__ solution_t shrSols[NT]; __shared__ int shrBestSolIndex[NT]; /** * Calculates the city-block distance between two point_t structs as defiend * by this function. distance(P1,P2) = |x1 − x2| + |y1 − y2| + |z1 − z2| * @param p1 A pointer to the first point. * @param p2 A pointer to the second point. * @return The city block distance between p1 and p2. */ __device__ double distance(point_t *p1, point_t *p2) { double tempX = p1->x - p2->x; if (tempX < 0) { tempX *= -1; } double tempY = p1->y - p2->y; if (tempY < 0) { tempY *= -1; } double tempZ = p1->z - p2->z; if (tempZ < 0) { tempZ *= -1; } return tempX + tempY + tempZ; } /** * Compares to different solution_t to find the one with the lowest distance then a index and finall b index. * @param a Pointer to first solution. * @param b Pointer to second solution. * @return true if a is the better solution false otherwise. */ __device__ bool compareSol(solution_t *a, solution_t *b){ bool aIsbest = false; if(a->d == -1.0){aIsbest = false;} else if(b->d == -1.0){aIsbest = true;} else if (b->d > a->d) { aIsbest = true; } else if (b->d == a->d) { if (b->a > a->a){ aIsbest = true; }else{ if (b->b > a->b){ aIsbest = true; } } } return aIsbest; } /** * Device kernel to calculate the distance for each point to its closest medoid. * * Called with a one-dimensional grid of one-dimensional blocks, N blocks, NT * threads per block. N = number of points. Each block finds the best solution * for its given A index. Each thread within a block computes its total * distance for its B index(s). * * @param pointList Array of all the points. * @param solutions Array of all the solutions the gpu finds. * @param N Total number of points. */ extern "C" __global__ void computeMedoids (point_t *pointList, solution_t *solutions, int N) { int a = blockIdx.x, b; // X index of this block, the A medoids index int thrd = threadIdx.x; // Index of this thread within block point_t medA = pointList[a]; // Medoid A's point // double *solu = &solutions[a + b * N]; double d = 0.0; solution_t sol; sol.a = a; sol.b = thrd; sol.d = -1.0; for (b = thrd; b < N; b += NT) { d = 0; solution_t tempSol; for (int p = 0; p < N; p++){ point_t medB = pointList[b]; // Medoid B's point if (p == a || p == b) continue; double distA = distance( &medA, &pointList[p]); double distB = distance( &medB, &pointList[p]); d += (distA <= distB) ? distA : distB; tempSol.a = a; tempSol.b = b; tempSol.d = (b <= a | b >= N) ? -1 : d; } if(!compareSol(&sol, &tempSol)) sol = tempSol; } shrSols[thrd] = sol; shrBestSolIndex[thrd] = thrd; __syncthreads(); // Reduction to find the best solution in the current block for (int s = NT / 2; s > 0; s >>= 1) { if (thrd < s){ shrBestSolIndex[thrd] = compareSol(&shrSols[shrBestSolIndex[thrd]], &shrSols[shrBestSolIndex[thrd + s]]) ? shrBestSolIndex[thrd] : shrBestSolIndex[thrd + s]; } __syncthreads(); } solutions[a] = shrSols[shrBestSolIndex[0]]; __threadfence(); // Final reduction to find the best solution overall. if (thrd == 0) { int oldSol; int newSol; do { oldSol = devBestSol; newSol = compareSol(&solutions[devBestSol], &solutions[a]) ? devBestSol : a; } while (atomicCAS (&devBestSol, oldSol, newSol) != oldSol); } }
21,265
/* makematrix.cu Constructs matrix equation A x = b for local minimization problem Exports matrix A and vector b into plain text Written by Hee Sok Chung at ANL July 10, 2016 Modified by Ran Hong for cuda compatibility */ #include <stdio.h> #include <stdlib.h> #include <math.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /* Legendre functions are defined without normalization factors in order to * avoid overflow from gamma function */ /* hypergeometric function 1F2 */ double h1f2(double a, double b, double c, double z); /* LegendreQ(m-1/2,n,cosh(x)) */ double LegendreQ(double m, double n, double z); /* derivative of LegendreQ(m-1/2,n,cosh(x)) */ double DLegendreQ(int m, int n, double z); /* coordinate transformation r, z -> zeta */ double zetaf(double rho, double z, double r0); /* coordinate transformation r, z -> eta */ double etaf(double rho, double z, double r0); /* main subroutine that calculates matrix and vector elements */ void cpx(double **datain, double *cc, double *cs, double *sc, double *ss, double **matrix, double *zeta, double *eta, int **ccidx, int ndata, int dim1, double zt0, double rr, int *sliceflag); /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorProd(const double *d_tcc, const double *d_tcs, const double *d_tsc, const double *d_tss, double *d_M, int dim_v) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int ii=0; int jj=0; int dim_M = 4*dim_v+2; double local_M[4][4]; if (i < dim_v && j< dim_v){ for (ii=0;ii<4;ii++){ for (jj=0;jj<4;jj++){ local_M[ii][jj]=d_M[(4*i+ii+1)*dim_M+4*j+jj+1]; } } local_M[0][0]+=d_tcc[i]*d_tcc[j]; local_M[1][0]+=d_tcs[i]*d_tcc[j]; local_M[2][0]+=d_tsc[i]*d_tcc[j]; local_M[3][0]+=d_tss[i]*d_tcc[j]; local_M[0][1]+=d_tcc[i]*d_tcs[j]; local_M[1][1]+=d_tcs[i]*d_tcs[j]; local_M[2][1]+=d_tsc[i]*d_tcs[j]; local_M[3][1]+=d_tss[i]*d_tcs[j]; local_M[0][2]+=d_tcc[i]*d_tsc[j]; local_M[1][2]+=d_tcs[i]*d_tsc[j]; local_M[2][2]+=d_tsc[i]*d_tsc[j]; local_M[3][2]+=d_tss[i]*d_tsc[j]; local_M[0][3]+=d_tcc[i]*d_tss[j]; local_M[1][3]+=d_tcs[i]*d_tss[j]; local_M[2][3]+=d_tsc[i]*d_tss[j]; local_M[3][3]+=d_tss[i]*d_tss[j]; for (ii=0;ii<4;ii++){ for (jj=0;jj<4;jj++){ d_M[(4*i+ii+1)*dim_M+4*j+jj+1]=local_M[ii][jj]; } } } } int main (int argc, char **argv) { int nmax, mmax, ndata, dim1, dim2x, vecdim; // Max. number of harmonics in azimuthal direction nmax=200; // Max. number of harmonics in poloidal direction mmax=8; // Matrix and vector dimensions dim1=(nmax+1)*(mmax+1); dim2x=4*dim1; double rr; double *cc, *cs, *sc, *ss; double **matrix, **datain; double datatmp; double zeta[30], eta[30]; double zt0, bzero; double angles[30], radii[30]; int **ccidx; int **mnidx; int *sinz, *vecidx; int i, j, id, md, nd, prb ; int vecc; FILE *input; FILE *inputc; // procnum=11; // number of thread to use // printf("Using %d threads\n",procnum); // declare vector and matrix elements as pointers cc=(double *)malloc((dim1+2)*sizeof(double)); cs=(double *)malloc((dim1+2)*sizeof(double)); sc=(double *)malloc((dim1+2)*sizeof(double)); ss=(double *)malloc((dim1+2)*sizeof(double)); matrix=(double **)malloc((dim2x+2)*sizeof(double*)); for (i=0;i<(dim2x+2);i++) { matrix[i]=(double *)malloc((dim2x+2)*sizeof(double)); } sinz=(int *)malloc((dim2x+2)*sizeof(int)); // for identifying exact zeros vecidx=(int *)malloc((dim2x+2)*sizeof(int)); // vector index ccidx=(int **)malloc((dim1+2)*sizeof(int*)); // matrix single index -> double index for (i=0;i<(dim1+2);i++) { ccidx[i]=(int *)malloc(3*sizeof(int)); } mnidx=(int **)malloc((dim1+2)*sizeof(int*)); // matrix double index -> single index for (i=0;i<(dim1+2);i++) { mnidx[i]=(int *)malloc((dim1+2)*sizeof(int)); } // count number of azimuthal slices ndata=0; // inputc=fopen("data40.txt", "r"); // open data for counting inputc=fopen("data52.txt", "r"); // open data for counting while(fscanf(inputc, "%lf", &datatmp)>0) { ndata++; } ndata=ndata/26; // azimuthal angle + 25 probes fclose(inputc); // close data file used for counting int sliceflag[ndata+1]; printf("number of data = %d\n", ndata); // this pointer will be used for data taking datain=(double **)malloc((ndata+1)*sizeof(double*)); for (id=0;id<=ndata;id++) { datain[id]=(double *)malloc((25+2)*sizeof(double)); } // distribute azimuthal slices to threads // printf("%d\n",procnum); // printf("%d\n",ndatap); i=1; for (id=1;id<=ndata;id++){ sliceflag[id]=1; } // printf("Max i = %d\n", i); bzero=61.789; // reference B field, chosen close to average B field zt0=6.5; // toroidal harmonics will be normalized at zeta=zt0 // construct probe positions angles[1]=0; radii[1]=0; for (i=2;i<=9;i++) { angles[i]=(i-2.)*M_PI/4; radii[i]=22.5; } for (i=10;i<=25;i++) { angles[i]=(i-10.)*M_PI/8; radii[i]=45.; } /* for (i=1;i<=25;i++) { printf("probe %d location r=%lf, theta=%lf \n",i+1,radii[i],angles[i]); } */ // calculate toroidal coordinates rr=7111.5; // toroid center. MUST NEVER COINCIDE WITH ACTUAL PROBE POSITION printf ("\nComputing coordinates\n"); for (i=1;i<=25;i++) { zeta[i]=zetaf(radii[i]*sin(angles[i])+7112., \ -radii[i]*cos(angles[i]),rr); eta[i]=etaf(radii[i]*sin(angles[i])+7112., \ -radii[i]*cos(angles[i]),rr); } /* initialize arrays */ i=0; nd=0; md=0; printf ("\nInitializing coefficients\n"); for (i=0;i<=dim2x;i++) { sinz[i]=1; } i=0; for (nd=0;nd<=nmax;nd++) { for (md=0;md<=mmax;md++) { i+=1; ccidx[i][1]=md; ccidx[i][2]=nd; mnidx[md][nd]=4*(i-1); cc[i]=0.; cs[i]=0.; sc[i]=0.; ss[i]=0.; if(nd==0) { // identify sin(0) = 0 due to n = 0 sinz[4*(i-1)+2]=0; sinz[4*(i-1)+4]=0; } if(md==0) { // identify sin(0) = 0 due to m = 0 sinz[4*(i-1)+3]=0; sinz[4*(i-1)+4]=0; } } } printf ("\nInitializing matrix\n"); for (i=0;i<=dim2x;i++) { for (j=0;j<=dim2x;j++) { matrix[i][j]=0.; } } // input =fopen("data40.txt", "r"); // this one's for actually reading data input =fopen("data52.txt", "r"); // this one's for actually reading data for (id=1;id<=ndata;id++) { fscanf (input, "%lf", &datatmp); // read azimuthal angle in degrees datain[id][0]=datatmp; for (prb=1;prb<=25;prb++){ // loop over 25 probes fscanf (input, "%lf", &datatmp); // read B-fields in kHz datatmp=datatmp*0.001+61.7400000-bzero; // convert to MHz and offset datain[id][prb]=datatmp; } } fclose(input); // close data /////////////////////////////////////////////////////////////////////////////////////////////////// printf ("\nComputing matrix elements\n"); // Calculate matrix and vector elements cpx(datain, cc, cs, sc, ss, matrix, zeta, eta, ccidx, ndata, dim1, zt0, rr, sliceflag); /////////////////////////////////////////////////////////////////////////////////////////////////// for(i=1;i<=ndata;i++) { if (sliceflag[i]!=0) printf("slice %d status = %d\n", i, sliceflag[i]); } // combine results from individual threads FILE *out0, *out1, *outn, *outd; out0 = fopen ("outmatrix.dat", "w"); // matrix elements out1 = fopen ("outvector.dat", "w"); // vector elements outn = fopen ("outnames.txt", "w"); // index dictionary outd = fopen ("vecdim.txt", "w"); // matrix dimensions fprintf(outn, "%d\n", mmax); fprintf(outn, "%d\n", nmax); for (i=1;i<=dim1;i++) { fprintf(outn, "1\t%d\t%d\n",ccidx[i][1], ccidx[i][2]); if(sinz[4*(i-1)+2]!=0) { fprintf(outn, "2\t%d\t%d\n",ccidx[i][1], ccidx[i][2]); } if(sinz[4*(i-1)+3]!=0) { fprintf(outn, "3\t%d\t%d\n",ccidx[i][1], ccidx[i][2]); } if(sinz[4*(i-1)+4]!=0) { fprintf(outn, "4\t%d\t%d\n",ccidx[i][1], ccidx[i][2]); } } vecdim=0; for (i=1;i<=dim1;i++) { vecdim++; // fprintf(out1, "%.17g\n", cc[i]); fwrite(&(cc[i]),sizeof(double),1,out1); if(sinz[4*(i-1)+2]!=0) { vecdim++; // fprintf(out1, "%.17g\n", cs[i]); fwrite(&(cs[i]),sizeof(double),1,out1); } if(sinz[4*(i-1)+3]!=0) { vecdim++; // fprintf(out1, "%.17g\n", sc[i]); fwrite(&(sc[i]),sizeof(double),1,out1); } if(sinz[4*(i-1)+4]!=0) { vecdim++; // fprintf(out1, "%.17g\n", ss[i]); fwrite(&(ss[i]),sizeof(double),1,out1); } } id=0; for (i=1;i<=dim2x;i++) { if(sinz[i]!=0) { id++; vecidx[id]=i; } } vecc=0; for (i=1;i<=vecdim;i++) { for (j=1;j<=vecdim;j++) { vecc++; // fprintf(out0, "%.17g\n", matrix[vecidx[i]][vecidx[j]]); fwrite(&(matrix[vecidx[i]][vecidx[j]]),sizeof(double),1,out0); } } fprintf(outd, "%d\n", vecdim); fclose(out0); fclose(out1); fclose(outn); fclose(outd); free(cc); free(cs); free(sc); free(ss); free(ccidx); return 0; } /* hypergeometric function 1F2 evaluated by truncating an infinite sum */ double h1f2(double a, double b, double c, double z){ int i, imax; double err, tol, si, sii, f1f2; double errabs, errrel; imax=100000000; // 10^8 maximum iterations sii=1.; // initial term err=1.; // estimated uncertainty f1f2=1.; // initial contribution i=0; // iterator tol=pow(10.,-16.); // error tolerance if (z<tol||z>1.-tol) { // z out of range or dangerously close to 0 or 1 i=imax+1; f1f2=1./i; // to return inf or nan i=imax+1; // stop evaluation } while (err>tol&&i<=imax) { i++; si=(a+i-1.)*(b+i-1.)/(c+i-1.)*z/i*sii; // next term f1f2+=si; //next term added errabs=fabsl(si*z/(1.-z)); // estimated absolute uncertainty errrel=fabsl(errabs/f1f2); // estimated relative uncertainty if (errabs>errrel) { // choose larger one as error err=errabs; } else { err=errrel; } sii=si; } return f1f2; } /* Legendre function of the second kind, Q_{m+1/2}^n (cosh(z)). Normalized to remove gamma function. Regular inside the torus */ double LegendreQ(double m, double n, double z){ double lq; lq=pow(tanh(z),n)/pow(cosh(z),m+.5); lq=lq*h1f2(.5*(m+n+.5),.5*(m+n+1.5),m+1.,1./cosh(z)/cosh(z)); return lq; } /* Derivative of Q_{m+1/2}^n (cosh(z)). Normalized to remove gamma function. */ double DLegendreQ(int m, int n, double z){ double dlq, lq1, lq2; if (m==0) { dlq=-1/(8*pow(cosh(z),1.5))/sinh(z); dlq=dlq*pow(tanh(z),n); dlq=dlq*( (4.*pow(sinh(z),2.)-8.*n)* \ h1f2(n/2.+.25,n/2.+.75,1.,1/cosh(z)/cosh(z)) + \ (4.*n*n+8.*n+3.)*tanh(z)*tanh(z)* \ h1f2(n/2.+1.25,n/2.+1.75,2.,1/cosh(z)/cosh(z))); } else { lq1=pow(tanh(z),n)/pow(cosh(z),m+.5); lq1=lq1*h1f2(.5*(m+n+.5),.5*(m+n+1.5),m+1.,1./cosh(z)/cosh(z)); lq2=pow(tanh(z),n)/pow(cosh(z),m-.5); lq2=lq2*h1f2(.5*(m+n-.5),.5*(m+n+.5),m*1.,1./cosh(z)/cosh(z)); dlq=(m-.5)/tanh(z)*lq1-(2.*m)/sinh(z)*lq2; } return dlq; } /* coordinate transformation r, z -> zeta */ double zetaf(double rho, double z, double r0){ double zetax; zetax=atanh(2.*rho*r0/(rho*rho+r0*r0+z*z)); return zetax; } /* coordinate transformation r, z -> eta */ double etaf(double rho, double z, double r0){ double etax, xx; int i; xx=2.*r0*z/(rho*rho-r0*r0+z*z); if (fabsl(xx)<0.001) { etax=1.; for (i=1;i<=10;i++) { etax=etax+pow(xx,2.*i)/(2.*i+1.)*cos(M_PI*i); } etax=etax*xx; } else { etax=atanl(xx); } if (rho<sqrtl(r0*r0-z*z)) etax=etax+M_PI; return etax; } void cpx(double **datain, double *cc, double *cs, double *sc, double *ss, double **matrix, double *zeta, double *eta, int **ccidx, int ndata, int dim1, double zt0, double rr, int *sliceflag) { double datatmp, zt, et, wgt, phi; double *tcc, *tcs, *tsc, *tss; tcc=(double *)malloc(sizeof*tcc*(dim1)); tcs=(double *)malloc(sizeof*tcs*(dim1)); tsc=(double *)malloc(sizeof*tsc*(dim1)); tss=(double *)malloc(sizeof*tss*(dim1)); int md, nd, i, id, prb, j ; double legq, dleq; cudaError_t err = cudaSuccess; //Calculate legq and dleq table double **Tlegq; double **Tdleq; Tlegq=(double **)malloc((dim1+1)*sizeof(double*)); Tdleq=(double **)malloc((dim1+1)*sizeof(double*)); for (int il=0;il<=dim1;il++) { Tlegq[il]=(double *)malloc(26*sizeof(double)); Tdleq[il]=(double *)malloc(26*sizeof(double)); } for (prb=1;prb<=25;prb++){ // loop over 25 probes zt=zeta[prb]; // zeta coordinate at probe for (i=1;i<=dim1;i++){ md=ccidx[i][1]; // m index nd=ccidx[i][2]; // n index /* LegendreQ at probe */ Tlegq[i][prb]=LegendreQ(md,nd,zeta[prb])/LegendreQ(md,nd,zt0); /* Derivative of LegendreQ at probe */ Tdleq[i][prb]=DLegendreQ(md,nd,zeta[prb])/LegendreQ(md,nd,zt0); } } //Table calculation complete //Re-allocate matrix double *h_M = NULL; int dimM = 4*dim1+2; size_t sizeM = dimM*dimM*sizeof(double); h_M = (double *)malloc(sizeM); //device Malloc double *d_tcc = NULL; double *d_tcs = NULL; double *d_tsc = NULL; double *d_tss = NULL; int d_size = sizeof(double)*dim1; double *d_M = NULL; err = cudaMalloc((void **)&d_tcc, d_size); err = cudaMalloc((void **)&d_tcs, d_size); err = cudaMalloc((void **)&d_tsc, d_size); err = cudaMalloc((void **)&d_tss, d_size); err = cudaMalloc((void **)&d_M, sizeM); //copy matrix memory to device err = cudaMemcpy(d_M, h_M, sizeM, cudaMemcpyHostToDevice); for (id=1;id<=ndata;id++) { // loop over azimuthal slices if (id!=0) { sliceflag[id]=0; datatmp=datain[id][0]; phi=datatmp*M_PI/180.; for (prb=1;prb<=25;prb++){ // loop over 25 probes datatmp=datain[id][prb]; zt=zeta[prb]; // zeta coordinate at probe et=eta[prb]; // eta coordinate at probe wgt=sqrt(cosh(zt)-cos(et)); // weight func in toroidal coordinates // double Sinh_zt = sinh(zt); double Sin_et = sin(et); double Cosh_zt = cosh(zt); double Cos_et = cos(et); // for (i=1;i<=dim1;i++){ md=ccidx[i][1]; // m index nd=ccidx[i][2]; // n index /* LegendreQ at probe */ //legq=LegendreQ(md,nd,zeta[prb])/LegendreQ(md,nd,zt0); legq=Tlegq[i][prb]; /* Derivative of LegendreQ at probe */ //dleq=DLegendreQ(md,nd,zeta[prb])/LegendreQ(md,nd,zt0); dleq=Tdleq[i][prb]; // CC(m,n) coefficient tcc[i-1]=-Sinh_zt*Sin_et/rr*cos(nd*phi)*cos(md*et)* \ (Sinh_zt/2./wgt*legq+wgt*dleq) \ -(1.-Cosh_zt*Cos_et)/rr* \ (-md*cos(nd*phi)*sin(md*et)*wgt*legq \ +cos(nd*phi)*cos(md*et)*Sin_et/2./wgt*legq); // CS(m,n) coefficient tcs[i-1]=-Sinh_zt*Sin_et/rr*sin(nd*phi)*cos(md*et)* \ (Sinh_zt/2./wgt*legq+wgt*dleq) \ -(1.-Cosh_zt*Cos_et)/rr* \ (-md*sin(nd*phi)*sin(md*et)*wgt*legq \ +sin(nd*phi)*cos(md*et)*Sin_et/2./wgt*legq); // SC(m,n) coefficient tsc[i-1]=-Sinh_zt*Sin_et/rr*cos(nd*phi)*sin(md*et)* \ (Sinh_zt/2./wgt*legq+wgt*dleq) \ -(1.-Cosh_zt*Cos_et)/rr* \ (md*cos(nd*phi)*cos(md*et)*wgt*legq \ +cos(nd*phi)*sin(md*et)*Sin_et/2./wgt*legq); // SS(m,n) coefficient tss[i-1]=-Sinh_zt*Sin_et/rr*sin(nd*phi)*sin(md*et)* \ (Sinh_zt/2./wgt*legq+wgt*dleq) \ -(1.-Cosh_zt*Cos_et)/rr* \ (md*sin(nd*phi)*cos(md*et)*wgt*legq \ +sin(nd*phi)*sin(md*et)*Sin_et/2./wgt*legq); } // define tcc loop end //Start parallel computation on gpu //copy memory err = cudaMemcpy(d_tcc, tcc, d_size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_tcs, tcs, d_size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_tsc, tsc, d_size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_tss, tss, d_size, cudaMemcpyHostToDevice); dim3 DimBlock (16,16); dim3 DimGrid (dim1/16+1, dim1/16+1); // printf("CUDA kernel launch with %d blocks of %d threads\n", DimGrid.x * DimGrid.y, 256); vectorProd<<<DimGrid, DimBlock>>>(d_tcc, d_tcs, d_tsc, d_tss, d_M, dim1); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } for (i=1;i<=dim1;i++){ cc[i]+=tcc[i-1]*datatmp; cs[i]+=tcs[i-1]*datatmp; sc[i]+=tsc[i-1]*datatmp; ss[i]+=tss[i-1]*datatmp; } // define vector loop end // for each data end } // probe loop end // printf ("\nSlice %d (phi=%lf) done\n", id, phi); printf ("\nSlice %d of %d done\n", id, ndata); } } // id end //copy matrix from device to host err = cudaMemcpy(h_M, d_M, sizeM, cudaMemcpyDeviceToHost); //copy matrix back to main for (i=0;i<dimM;i++){ for (j=0;j<dimM;j++) { matrix[i][j]=h_M[i*dimM+j]; // printf("%d,%d,%.17g\n",i,j,matrix[i][j]); } } // Free device global memory err = cudaFree(d_tcc); err = cudaFree(d_tcs); err = cudaFree(d_tsc); err = cudaFree(d_tss); err = cudaFree(d_M); free(tcc); free(tcs); free(tsc); free(tss); for (int il=0;il<=dim1;il++) { free(Tlegq[il]); free(Tdleq[il]); } free(Tlegq); free(Tdleq); free(h_M); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); // return 0; }
21,266
#include <cstdio> __global__ void cuda_hello(){ printf("Hello World from GPU!\n"); } int main() { int cnt{0}; cudaGetDeviceCount(&cnt); printf("Number of GPUs: %d\n", cnt); int version; cudaRuntimeGetVersion(&version); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); printf("-------------------------------------\n"); printf("Device name: %s\n", prop.name); printf("CUDA Runtime Version: %d.%d\n", version/1000, (version%100)/10); printf("Compute capability: %d.%d\n", prop.major, prop.minor); printf("Total global memory: %ld bytes (%lf GiB)\n", prop.totalGlobalMem, prop.totalGlobalMem/1.074e+9); printf("-------------------------------------\n"); cuda_hello<<<1, 10>>>(); cudaDeviceSynchronize(); return 0; }
21,267
#include "util.cuh" __device__ int datacmp(const unsigned char *l_dat, const unsigned char *r_dat, uint32_t len) { int match = 0; uint32_t i = 0; uint32_t done = 0; while ( i < len && match == 0 && !done ) { if ( l_dat[i] != r_dat[i] ) { match = i + 1; if ( (int)l_dat[i] - (int)r_dat[i] < 0 ) { match = 0 - (i + 1); } } i++; } return match; }
21,268
#include "includes.h" #ifdef __CUDACC__ #define KERNEL_ARGS2(grid, block) <<< grid, block >>> #define KERNEL_ARGS3(grid, block, sh_mem) <<< grid, block, sh_mem >>> #define KERNEL_ARGS4(grid, block, sh_mem, stream) <<< grid, block, sh_mem, stream >>> #else #define KERNEL_ARGS2(grid, block) #define KERNEL_ARGS3(grid, block, sh_mem) #define KERNEL_ARGS4(grid, block, sh_mem, stream) #endif float a[1024][1024], b[1024][1024], c[1024][1024]; // Now launch your kernel using the appropriate macro: // Now launch your kernel using the appropriate macro: //kernel KERNEL_ARGS2(dim3(nBlockCount), dim3(nThreadCount)) (param1); //matrix multiplication on GPU __global__ void MMul(float*m, float*d, float*p, int n) { int r = blockIdx.y*blockDim.y + threadIdx.y;// row int c = blockIdx.x*blockDim.x + threadIdx.x;//column float p_sum = 0; for (int i = 0; i < n; i++) { p_sum = +m[r*n + i] * d[i*n + c]; } p[r*n + c] = p_sum; }
21,269
#include "includes.h" __global__ void transposeSmemUnrollPadDyn (float *out, float *in, const int nx, const int ny) { // dynamic shared memory extern __shared__ float tile[]; unsigned int ix = blockDim.x * blockIdx.x * 2 + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int ti = iy * nx + ix; unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x; unsigned int irow = bidx / blockDim.y; unsigned int icol = bidx % blockDim.y; // coordinate in transposed matrix unsigned int ix2 = blockDim.y * blockIdx.y + icol; unsigned int iy2 = blockDim.x * 2 * blockIdx.x + irow; unsigned int to = iy2 * ny + ix2; // transpose with boundary test if (ix + blockDim.x < nx && iy < ny) { // load data from global memory to shared memory unsigned int row_idx = threadIdx.y * (blockDim.x * 2 + IPAD) + threadIdx.x; tile[row_idx] = in[ti]; tile[row_idx + BDIMX] = in[ti + BDIMX]; // thread synchronization __syncthreads(); unsigned int col_idx = icol * (blockDim.x * 2 + IPAD) + irow; out[to] = tile[col_idx]; out[to + ny * BDIMX] = tile[col_idx + BDIMX]; } }
21,270
//Calculo de la FFT 2D utilizando la funcion cufftPlan2D(); #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cufft.h> #define RENGLONES 3 #define COLUMNAS 3 int main() { int i,j; cuFloatComplex *h_xn; cuFloatComplex *h_Xk; cufftComplex *in,*out; //Se reserva memoria para h_xn en el host h_xn = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*COLUMNAS*RENGLONES); //Se reserva memoria para h_Xk en el host h_Xk = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*COLUMNAS*RENGLONES); //Se dan valores a x[n] for(i=0;i<RENGLONES;i++) { for(j=0;j<COLUMNAS;j++) { //h_xn[(i*COLUMNAS)+j] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21)); h_xn[(i*COLUMNAS)+j] = make_cuFloatComplex((float)(((i*COLUMNAS)+j) + 1),(float)(0.0)); } } //Se imprimen los valores de entrada x[n] printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n"); for(i=0;i<RENGLONES;i++) { for(j=0;j<COLUMNAS;j++) { printf(" (%f) + (%f)",cuCrealf(h_xn[(i*COLUMNAS)+j]),cuCimagf(h_xn[(i*COLUMNAS)+j])); } printf("\n"); } //Se reserva memoria para "in" en el device cudaMalloc((void**)&in,sizeof(cufftComplex)*COLUMNAS*RENGLONES); //Se reserva memoria para "out" en el device cudaMalloc((void**)&out,sizeof(cufftComplex)*COLUMNAS*RENGLONES); //Se copian los datos de h_xn >>> in cudaMemcpy(in,h_xn,sizeof(cuFloatComplex)*COLUMNAS*RENGLONES,cudaMemcpyHostToDevice); //CUFFT plan cufftHandle plan; cufftPlan2d(&plan,RENGLONES,COLUMNAS, CUFFT_C2C); //Ejecucion de la fft cufftExecC2C(plan,in,out,CUFFT_FORWARD); //Se copian los datos de out >>> h_Xk cudaMemcpy(h_Xk,out,sizeof(cufftComplex)*RENGLONES*COLUMNAS,cudaMemcpyDeviceToHost); //Se imprimen los valores de salida X[k] printf("\n---ELEMENTOS DE SALIDA X[k]---\n\n"); for(i=0;i<RENGLONES;i++) { for(j=0;j<COLUMNAS;j++) { printf(" (%f) + (%f)",cuCrealf(h_Xk[(i*COLUMNAS)+j]),cuCimagf(h_Xk[(i*COLUMNAS)+j])); } printf("\n"); } //Se destruye el plan cufftDestroy(plan); //Se liberan memorias free(h_xn); free(h_Xk); cudaFree(in); cudaFree(out); }
21,271
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int A[5] = { 1, 2, 3, 4, 5 }; __device__ int d_A[5]; __global__ void multiply() { int i = threadIdx.x; d_A[i] = d_A[i] * 2; } int main() { cudaMemcpyToSymbol(d_A, A, 5 * sizeof(int)); multiply <<< 1, 5 >>> (); cudaMemcpyFromSymbol(A, d_A, 5 * sizeof(int)); for (int i = 0; i < 5; i++) { printf("%d ", A[i]); } }
21,272
#include <stdio.h> #include <iostream> #include <vector> #include <time.h> #include <math.h> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) # define M_PI 3.14159265358979323846 const int block_num = 512; #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) const int threadsPerBlock = sizeof(unsigned long long) * 8; __device__ static int point_inside_box_3d(float x, float y, float z, float cx, float by, float cz, float l, float h, float w, float ry, float max_distance){ float cos_ry, sin_ry; float canonical_x, canonical_z; int inside; if ((fabsf(x - cx) > max_distance) || (y > by) || ((by - y) > h) || (fabsf(z - cz) > max_distance)){ return 0; } cos_ry = cos(ry); sin_ry = sin(ry); canonical_x = (x - cx) * cos_ry - (z - cz) * sin_ry; canonical_z = (x - cx) * sin_ry + (z - cz) * cos_ry; inside = (canonical_x >= -l / 2.0) & (canonical_x <= l / 2.0) & (canonical_z >= -w / 2.0) & (canonical_z <= w / 2.0); return inside; } /* query boxes 3d points */ // input: nsample (1), xyz (b,n,3), proposals (b,m,7) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_boxes_3d_points_gpu(int b, int n, int m, int nsample, const float *xyz, const float *proposals, int *idx, int *pts_cnt) { int total_idx = b * m; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / m; const float* cur_xyz; const float* cur_proposal; cur_xyz = xyz + n*3*batch_index; cur_proposal = proposals + point_inds * 7; int* cur_idx; int* cur_pts_cnt; cur_idx = idx + nsample*point_inds; cur_pts_cnt = pts_cnt + point_inds; // counting how many unique points selected in local region float cx= cur_proposal[0]; float by= cur_proposal[1]; float cz= cur_proposal[2]; float l = cur_proposal[3]; float h = cur_proposal[4]; float w = cur_proposal[5]; float ry= cur_proposal[6]; float max_distance = max(sqrtf((l / 2.) * (l / 2.)+(w / 2.)*(w / 2.)),1e-20f); float x, y, z; int inside; int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball x=cur_xyz[k*3+0]; y=cur_xyz[k*3+1]; z=cur_xyz[k*3+2]; inside = point_inside_box_3d(x, y, z, cx, by, cz, l, h, w, ry, max_distance); if (inside) { if (cnt==0) { for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt+=1; } } cur_pts_cnt[0] = cnt; } } /* query boxes 3d mask */ // input: xyz (b,n,3), boxes_3d (b,m,7) // output: mask (b,m,n) __global__ void query_boxes_3d_mask_gpu(int b, int n, int m, const float *xyz, const float *boxes_3d, int *mask){ int total_idx = b * m * n; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / (m * n); int box_index = point_inds / n; int point_index = point_inds % n; const float* cur_xyz; const float* cur_boxes_3d; cur_xyz = xyz + batch_index * n * 3 + point_index * 3; cur_boxes_3d = boxes_3d + box_index * 7; int* cur_mask; cur_mask = mask + point_inds; float cx= cur_boxes_3d[0]; float by= cur_boxes_3d[1]; float cz= cur_boxes_3d[2]; float l = cur_boxes_3d[3]; float h = cur_boxes_3d[4]; float w = cur_boxes_3d[5]; float ry= cur_boxes_3d[6]; float max_distance = max(sqrtf((l / 2.) * (l / 2.)+(w / 2.)*(w / 2.)),1e-20f); float x = cur_xyz[0]; float y = cur_xyz[1]; float z = cur_xyz[2]; int inside; inside = point_inside_box_3d(x, y, z, cx, by, cz, l, h, w, ry, max_distance); cur_mask[0] = inside; } } /* query points iou */ // input: xyz (b,n,3), anchors_3d (b,anchors_num,7), gt_boxes_3d (b, gt_num, 7) // input: iou_matrix (b, anchors_num, gt_num) // output: iou_points(b, anchors_num, gt_num) __global__ void query_points_iou_gpu(int b, int n, int anchors_num, int gt_num, const float* xyz, const float* anchors_3d, const float* gt_boxes_3d, const float* iou_matrix, float* iou_points){ int total_idx = b * anchors_num * gt_num; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ float iou_value = iou_matrix[point_inds]; if (iou_value < 1e-3f){ // if no overlaps around two boxes_3d, then directly return 0 iou_points[point_inds] = 0.; continue; } // has overlaps, then calculate PointIoU int batch_index = point_inds / (anchors_num * gt_num); int anchor_index = point_inds / gt_num; int gt_index = point_inds % gt_num; const float* cur_xyz; const float* cur_anchors_3d; const float* cur_gt_boxes_3d; cur_xyz = xyz + batch_index * n * 3; cur_anchors_3d = anchors_3d + anchor_index * 7; cur_gt_boxes_3d = gt_boxes_3d + batch_index * gt_num * 7 + gt_index * 7; float* cur_iou_points; cur_iou_points = iou_points + point_inds; int in = 0, un = 0; float gt_boxes_cx= cur_gt_boxes_3d[0]; float gt_boxes_by= cur_gt_boxes_3d[1]; float gt_boxes_cz= cur_gt_boxes_3d[2]; float gt_boxes_l = cur_gt_boxes_3d[3]; float gt_boxes_h = cur_gt_boxes_3d[4]; float gt_boxes_w = cur_gt_boxes_3d[5]; float gt_boxes_ry= cur_gt_boxes_3d[6]; float gt_boxes_max_distance = max(sqrtf((gt_boxes_l / 2.) * (gt_boxes_l / 2.) + (gt_boxes_w / 2.) * (gt_boxes_w / 2.)),1e-20f); float anchors_cx= cur_anchors_3d[0]; float anchors_by= cur_anchors_3d[1]; float anchors_cz= cur_anchors_3d[2]; float anchors_l = cur_anchors_3d[3]; float anchors_h = cur_anchors_3d[4]; float anchors_w = cur_anchors_3d[5]; float anchors_ry= cur_anchors_3d[6]; float anchors_max_distance = max(sqrtf((anchors_l / 2.) * (anchors_l / 2.) + (anchors_w / 2.) * (anchors_w / 2.)),1e-20f); float x, y, z; int inside_anchors, inside_gt; for (int k=0;k<n;++k) { x=cur_xyz[k*3+0]; y=cur_xyz[k*3+1]; z=cur_xyz[k*3+2]; inside_anchors = point_inside_box_3d(x, y, z, anchors_cx, anchors_by, anchors_cz, anchors_l, anchors_h, anchors_w, anchors_ry, anchors_max_distance); inside_gt = point_inside_box_3d(x, y, z, gt_boxes_cx, gt_boxes_by, gt_boxes_cz, gt_boxes_l, gt_boxes_h, gt_boxes_w, gt_boxes_ry, gt_boxes_max_distance); un += (inside_gt | inside_anchors); in += (inside_gt & inside_anchors); } un = max(un, 1); cur_iou_points[0] = float(in) / float(un); } } // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { int total_idx = b * m; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / m; const float* cur_xyz1; const float* cur_xyz2; cur_xyz1 = xyz1 + n*3*batch_index; cur_xyz2 = xyz2 + point_inds * 3; int* cur_idx; int* cur_pts_cnt; cur_idx = idx + nsample*point_inds; cur_pts_cnt = pts_cnt + point_inds; // counting how many unique points selected in local region float x2=cur_xyz2[0]; float y2=cur_xyz2[1]; float z2=cur_xyz2[2]; float x1, y1, z1, d; int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball x1=cur_xyz1[k*3+0]; y1=cur_xyz1[k*3+1]; z1=cur_xyz1[k*3+2]; d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt+=1; } } cur_pts_cnt[0] = cnt; } } // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3), sort_idx (b, m, n) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_withidx_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, const int* sort_idx, int *idx, int *pts_cnt) { int total_idx = b * m; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / m; const float* cur_xyz1; const float* cur_xyz2; const int* cur_sort_idx; cur_xyz1 = xyz1 + n*3*batch_index; cur_xyz2 = xyz2 + point_inds * 3; cur_sort_idx = sort_idx + point_inds * n; int* cur_idx; int* cur_pts_cnt; cur_idx = idx + nsample*point_inds; cur_pts_cnt = pts_cnt + point_inds; // counting how many unique points selected in local region float x2=cur_xyz2[0]; float y2=cur_xyz2[1]; float z2=cur_xyz2[2]; float x1, y1, z1, d; int cnt = 0; int k; for (int i=0;i<n;++i) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball k = cur_sort_idx[i]; x1=cur_xyz1[k*3+0]; y1=cur_xyz1[k*3+1]; z1=cur_xyz1[k*3+2]; d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt+=1; } } cur_pts_cnt[0] = cnt; } } // input: min_radius (1), max_radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_dilated_gpu(int b, int n, int m, float min_radius, float max_radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { int total_idx = b * m; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / m; const float* cur_xyz1; const float* cur_xyz2; cur_xyz1 = xyz1 + n*3*batch_index; cur_xyz2 = xyz2 + point_inds * 3; int* cur_idx; int* cur_pts_cnt; cur_idx = idx + nsample*point_inds; cur_pts_cnt = pts_cnt + point_inds; // counting how many unique points selected in local region float x2=cur_xyz2[0]; float y2=cur_xyz2[1]; float z2=cur_xyz2[2]; float x1, y1, z1, d; int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball x1=cur_xyz1[k*3+0]; y1=cur_xyz1[k*3+1]; z1=cur_xyz1[k*3+2]; d=sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)); if (d == 0){ // x2, y2, z2: set all indices to k if (cnt == 0){ for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt += 1; } else if (d >= min_radius && d < max_radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt+=1; } } cur_pts_cnt[0] = cnt; } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int total_idx = b * m * nsample * c; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_inds = point_inds / (m * nsample * c); int idx_inds = point_inds / c; int cur_channel = point_inds % c; const float* cur_points = points + batch_inds * n * c; int cur_idx = idx[idx_inds]; float *cur_out = out + point_inds; if (cur_idx == -1){ cur_out[0] = float(0); } else{ cur_out[0] = cur_points[cur_idx * c + cur_channel]; } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int total_idx = b * m * nsample * c; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / (m * nsample * c); int idx_inds = point_inds / c; int cur_channel = point_inds % c; const float* cur_grad_out = grad_out + point_inds; int cur_idx = idx[idx_inds]; float* cur_grad_points = grad_points + batch_index * n * c; if (cur_idx != -1){ atomicAdd(&cur_grad_points[cur_idx * c + cur_channel], cur_grad_out[0]); } } } // input: k (1), distance matrix dist (b,m,n) // output: idx (b,m,n), dist_out (b,m,n) // only the top k results within n are useful __global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) { int batch_index = blockIdx.x; dist+=m*n*batch_index; outi+=m*n*batch_index; out+=m*n*batch_index; int index = threadIdx.x; int stride = blockDim.x; // copy from dist to dist_out for (int j=index;j<m;j+=stride) { for (int s=0;s<n;++s) { out[j*n+s] = dist[j*n+s]; outi[j*n+s] = s; } } float *p_dist; for (int j=index;j<m;j+=stride) { p_dist = out+j*n; // selection sort for the first k elements for (int s=0;s<k;++s) { int min=s; // find the min for (int t=s+1;t<n;++t) { if (p_dist[t]<p_dist[min]) { min = t; } } // swap min-th and i-th element if (min!=s) { float tmp = p_dist[min]; p_dist[min] = p_dist[s]; p_dist[s] = tmp; int tmpi = outi[j*n+min]; outi[j*n+min] = outi[j*n+s]; outi[j*n+s] = tmpi; } } } } void queryBoxes3dPointsLauncher(int b, int n, int m, int nsample, const float *xyz, const float *proposals, int *idx, int *pts_cnt){ query_boxes_3d_points_gpu<<<block_num, threadsPerBlock>>>(b,n,m,nsample,xyz,proposals,idx,pts_cnt); } void queryBoxes3dMaskLauncher(int b, int n, int m, const float *xyz, const float *boxes_3d, int *mask){ query_boxes_3d_mask_gpu<<<block_num, threadsPerBlock>>>(b,n,m,xyz,boxes_3d,mask); } void queryPointsIouLauncher(int b, int n, int anchors_num, int gt_num, const float* xyz, const float* anchors_3d, const float* gt_boxes_3d, const float* iou_matrix, float* iou_points){ query_points_iou_gpu<<<block_num, threadsPerBlock>>>(b,n,anchors_num,gt_num, xyz, anchors_3d, gt_boxes_3d, iou_matrix, iou_points); } void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { query_ball_point_gpu<<<block_num,threadsPerBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt); //cudaDeviceSynchronize(); } void queryBallPointDilatedLauncher(int b, int n, int m, float min_radius, float max_radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { query_ball_point_dilated_gpu<<<block_num,threadsPerBlock>>>(b,n,m,min_radius,max_radius,nsample,xyz1,xyz2,idx,pts_cnt); //cudaDeviceSynchronize(); } void queryBallPointWithidxLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, const int* sort_idx, int *idx, int *pts_cnt){ query_ball_point_withidx_gpu<<<block_num,threadsPerBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,sort_idx,idx,pts_cnt); } void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) { selection_sort_gpu<<<b,256>>>(b,n,m,k,dist,outi,out); //cudaDeviceSynchronize(); } void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ group_point_gpu<<<block_num,threadsPerBlock>>>(b,n,c,m,nsample,points,idx,out); //cudaDeviceSynchronize(); } void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ group_point_grad_gpu<<<block_num,threadsPerBlock>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //cudaDeviceSynchronize(); }
21,273
#include "includes.h" __global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { int batch_index = blockIdx.x; xyz1 += n*3*batch_index; xyz2 += m*3*batch_index; idx += m*nsample*batch_index; pts_cnt += m*batch_index; // counting how many unique points selected in local region int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius[0]) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } pts_cnt[j] = cnt; } }
21,274
#include <stdio.h> __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int main(void) { int *d_a, *d_b, *d_c; // device copies of a, b, c int size = sizeof(int); // Allocate space for device copies of a, b, c cudaMallocManaged(&d_a, size); cudaMallocManaged (&d_b, size); cudaMallocManaged (&d_c, size); // Setup input values *d_a = 2; *d_b = 7; *d_c = 0; // Launch add() kernel on GPU add<<<1,1>>>(d_a, d_b, d_c); cudaDeviceSynchronize(); printf("d_c = %d\n", *d_c); // Cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
21,275
#include <iostream> //Keyword __global__ is used to indicate the function will be run on the GPU __global__ void kernel(int *a, int *b, int *c){ //This function is compiled by nvcc where as the other functions are handled by g++ or gcc *c = *a + *b; } int main(int argc, char const *argv[]) { /* code */ int a,b,c = 0; int *device_a, *device_b, *device_c; int size = sizeof(int); //Allocate memory in device/GPU cudaMalloc((void **)&device_a,size); cudaMalloc((void **)&device_b,size); cudaMalloc((void **)&device_c,size); a = 2; b = 7; //Copy memory over to GPU cudaMemcpy(device_a,&a,size,cudaMemcpyHostToDevice); cudaMemcpy(device_b,&b,size,cudaMemcpyHostToDevice); //<<<blocknumber, threadnumber>>> kernel<<<1,1>>>(device_a,device_b,device_c); //Copy back over to CPU cudaMemcpy(&c,device_c,size,cudaMemcpyDeviceToHost); std::cout << c << '\n'; cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); return 0; }
21,276
/* Matt Dean - 1422434 - mxd434 Goals implemented: - Block scan for arbitrary length small vectors - 'blockscan' function - Full scan for arbitrary length large vectors - 'scan' function This function decides whether to perform a small (one block) scan or a full (n-level) scan depending on the length of the input vector - BCAO for both scans Hardware: CPU - Intel Core i5-4670k @ 3.4GHz GPU - NVIDIA GeForce GTX 760 Timings: 10,000,000 Elements host : 20749 ms gpu : 7.860768 ms gpu bcao : 4.304064 ms For more results please see the comment at the bottom of this file Extra work: Due to the recursive nature of the full scan it can handle n > 3 levels */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" // scan.cuh long sequential_scan(int* output, int* input, int length); float blockscan(int *output, int *input, int length, bool bcao); float scan(int *output, int *input, int length, bool bcao); void scanLargeDeviceArray(int *output, int *input, int length, bool bcao); void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao); void scanLargeEvenDeviceArray(int *output, int *input, int length, bool bcao); // kernels.cuh __global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo); __global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo); __global__ void prescan_large(int *output, int *input, int n, int* sums); __global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums); __global__ void add(int *output, int length, int *n1); __global__ void add(int *output, int length, int *n1, int *n2); // utils.h void _checkCudaError(const char *message, cudaError_t err, const char *caller); void printResult(const char* prefix, int result, long nanoseconds); void printResult(const char* prefix, int result, float milliseconds); bool isPowerOfTwo(int x); int nextPowerOfTwo(int x); long get_nanos(); /*///////////////////////////////////*/ /* Main.cpp */ /*///////////////////////////////////*/ void test(int N) { bool canBeBlockscanned = N <= 1024; time_t t; srand((unsigned)time(&t)); int *in = new int[N]; for (int i = 0; i < N; i++) { in[i] = rand() % 10; } printf("%i Elements \n", N); // sequential scan on CPU int *outHost = new int[N](); long time_host = sequential_scan(outHost, in, N); printResult("host ", outHost[N - 1], time_host); // full scan int *outGPU = new int[N](); float time_gpu = scan(outGPU, in, N, false); printResult("gpu ", outGPU[N - 1], time_gpu); // full scan with BCAO int *outGPU_bcao = new int[N](); float time_gpu_bcao = scan(outGPU_bcao, in, N, true); printResult("gpu bcao", outGPU_bcao[N - 1], time_gpu_bcao); if (canBeBlockscanned) { // basic level 1 block scan int *out_1block = new int[N](); float time_1block = blockscan(out_1block, in, N, false); printResult("level 1 ", out_1block[N - 1], time_1block); // level 1 block scan with BCAO int *out_1block_bcao = new int[N](); float time_1block_bcao = blockscan(out_1block_bcao, in, N, true); printResult("l1 bcao ", out_1block_bcao[N - 1], time_1block_bcao); delete[] out_1block; delete[] out_1block_bcao; } printf("\n"); delete[] in; delete[] outHost; delete[] outGPU; delete[] outGPU_bcao; } int main() { int TEN_MILLION = 10000000; int ONE_MILLION = 1000000; int TEN_THOUSAND = 10000; int elements[] = { TEN_MILLION * 2, TEN_MILLION, ONE_MILLION, TEN_THOUSAND, 5000, 4096, 2048, 2000, 1000, 500, 100, 64, 8, 5 }; int numElements = sizeof(elements) / sizeof(elements[0]); for (int i = 0; i < numElements; i++) { test(elements[i]); } return 0; } /*///////////////////////////////////*/ /* scan.cu */ /*///////////////////////////////////*/ #define checkCudaError(o, l) _checkCudaError(o, l, __func__) int THREADS_PER_BLOCK = 512; int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2; long sequential_scan(int* output, int* input, int length) { long start_time = get_nanos(); output[0] = 0; // since this is a prescan, not a scan for (int j = 1; j < length; ++j) { output[j] = input[j - 1] + output[j - 1]; } long end_time = get_nanos(); return end_time - start_time; } float blockscan(int *output, int *input, int length, bool bcao) { int *d_out, *d_in; const int arraySize = length * sizeof(int); cudaMalloc((void **)&d_out, arraySize); cudaMalloc((void **)&d_in, arraySize); cudaMemcpy(d_out, output, arraySize, cudaMemcpyHostToDevice); cudaMemcpy(d_in, input, arraySize, cudaMemcpyHostToDevice); // start timer cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); int powerOfTwo = nextPowerOfTwo(length); if (bcao) { prescan_arbitrary<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo); } else { prescan_arbitrary_unoptimized<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo); } // end timer cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsedTime = 0; cudaEventElapsedTime(&elapsedTime, start, stop); cudaMemcpy(output, d_out, arraySize, cudaMemcpyDeviceToHost); cudaFree(d_out); cudaFree(d_in); cudaEventDestroy(start); cudaEventDestroy(stop); return elapsedTime; } float scan(int *output, int *input, int length, bool bcao) { int *d_out, *d_in; const int arraySize = length * sizeof(int); cudaMalloc((void **)&d_out, arraySize); cudaMalloc((void **)&d_in, arraySize); cudaMemcpy(d_out, output, arraySize, cudaMemcpyHostToDevice); cudaMemcpy(d_in, input, arraySize, cudaMemcpyHostToDevice); // start timer cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); if (length > ELEMENTS_PER_BLOCK) { scanLargeDeviceArray(d_out, d_in, length, bcao); } else { scanSmallDeviceArray(d_out, d_in, length, bcao); } // end timer cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsedTime = 0; cudaEventElapsedTime(&elapsedTime, start, stop); cudaMemcpy(output, d_out, arraySize, cudaMemcpyDeviceToHost); cudaFree(d_out); cudaFree(d_in); cudaEventDestroy(start); cudaEventDestroy(stop); return elapsedTime; } void scanLargeDeviceArray(int *d_out, int *d_in, int length, bool bcao) { int remainder = length % (ELEMENTS_PER_BLOCK); if (remainder == 0) { scanLargeEvenDeviceArray(d_out, d_in, length, bcao); } else { // perform a large scan on a compatible multiple of elements int lengthMultiple = length - remainder; scanLargeEvenDeviceArray(d_out, d_in, lengthMultiple, bcao); // scan the remaining elements and add the (inclusive) last element of the large scan to this int *startOfOutputArray = &(d_out[lengthMultiple]); scanSmallDeviceArray(startOfOutputArray, &(d_in[lengthMultiple]), remainder, bcao); add<<<1, remainder>>>(startOfOutputArray, remainder, &(d_in[lengthMultiple - 1]), &(d_out[lengthMultiple - 1])); } } void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao) { int powerOfTwo = nextPowerOfTwo(length); if (bcao) { prescan_arbitrary<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo); } else { prescan_arbitrary_unoptimized<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo); } } void scanLargeEvenDeviceArray(int *d_out, int *d_in, int length, bool bcao) { const int blocks = length / ELEMENTS_PER_BLOCK; const int sharedMemArraySize = ELEMENTS_PER_BLOCK * sizeof(int); int *d_sums, *d_incr; cudaMalloc((void **)&d_sums, blocks * sizeof(int)); cudaMalloc((void **)&d_incr, blocks * sizeof(int)); if (bcao) { prescan_large<<<blocks, THREADS_PER_BLOCK, 2 * sharedMemArraySize>>>(d_out, d_in, ELEMENTS_PER_BLOCK, d_sums); } else { prescan_large_unoptimized<<<blocks, THREADS_PER_BLOCK, 2 * sharedMemArraySize>>>(d_out, d_in, ELEMENTS_PER_BLOCK, d_sums); } const int sumsArrThreadsNeeded = (blocks + 1) / 2; if (sumsArrThreadsNeeded > THREADS_PER_BLOCK) { // perform a large scan on the sums arr scanLargeDeviceArray(d_incr, d_sums, blocks, bcao); } else { // only need one block to scan sums arr so can use small scan scanSmallDeviceArray(d_incr, d_sums, blocks, bcao); } add<<<blocks, ELEMENTS_PER_BLOCK>>>(d_out, ELEMENTS_PER_BLOCK, d_incr); cudaFree(d_sums); cudaFree(d_incr); } /*///////////////////////////////////*/ /* kernels.cu */ /*///////////////////////////////////*/ #define SHARED_MEMORY_BANKS 32 #define LOG_MEM_BANKS 5 // There were two BCAO optimisations in the paper - this one is fastest #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_MEM_BANKS) __global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo) { extern __shared__ int temp[];// allocated on invocation int threadID = threadIdx.x; int ai = threadID; int bi = threadID + (n / 2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); if (threadID < n) { temp[ai + bankOffsetA] = input[ai]; temp[bi + bankOffsetB] = input[bi]; } else { temp[ai + bankOffsetA] = 0; temp[bi + bankOffsetB] = 0; } int offset = 1; for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } if (threadID == 0) { temp[powerOfTwo - 1 + CONFLICT_FREE_OFFSET(powerOfTwo - 1)] = 0; // clear the last element } for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); if (threadID < n) { output[ai] = temp[ai + bankOffsetA]; output[bi] = temp[bi + bankOffsetB]; } } __global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo) { extern __shared__ int temp[];// allocated on invocation int threadID = threadIdx.x; if (threadID < n) { temp[2 * threadID] = input[2 * threadID]; // load input into shared memory temp[2 * threadID + 1] = input[2 * threadID + 1]; } else { temp[2 * threadID] = 0; temp[2 * threadID + 1] = 0; } int offset = 1; for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (threadID == 0) { temp[powerOfTwo - 1] = 0; } // clear the last element for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); if (threadID < n) { output[2 * threadID] = temp[2 * threadID]; // write results to device memory output[2 * threadID + 1] = temp[2 * threadID + 1]; } } __global__ void prescan_large(int *output, int *input, int n, int *sums) { extern __shared__ int temp[]; int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * n; int ai = threadID; int bi = threadID + (n / 2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); temp[ai + bankOffsetA] = input[blockOffset + ai]; temp[bi + bankOffsetB] = input[blockOffset + bi]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)]; temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0; } for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); output[blockOffset + ai] = temp[ai + bankOffsetA]; output[blockOffset + bi] = temp[bi + bankOffsetB]; } __global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * n; extern __shared__ int temp[]; temp[2 * threadID] = input[blockOffset + (2 * threadID)]; temp[2 * threadID + 1] = input[blockOffset + (2 * threadID) + 1]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = temp[n - 1]; temp[n - 1] = 0; } for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); output[blockOffset + (2 * threadID)] = temp[2 * threadID]; output[blockOffset + (2 * threadID) + 1] = temp[2 * threadID + 1]; } __global__ void add(int *output, int length, int *n) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID]; } __global__ void add(int *output, int length, int *n1, int *n2) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * length; output[blockOffset + threadID] += n1[blockID] + n2[blockID]; } /*///////////////////////////////////*/ /* utils.cpp */ /*///////////////////////////////////*/ void _checkCudaError(const char *message, cudaError_t err, const char *caller) { if (err != cudaSuccess) { fprintf(stderr, "Error in: %s\n", caller); fprintf(stderr, message); fprintf(stderr, ": %s\n", cudaGetErrorString(err)); exit(0); } } void printResult(const char* prefix, int result, long nanoseconds) { printf(" "); printf(prefix); printf(" : %i in %ld ms \n", result, nanoseconds / 1000); } void printResult(const char* prefix, int result, float milliseconds) { printf(" "); printf(prefix); printf(" : %i in %f ms \n", result, milliseconds); } // from https://stackoverflow.com/a/3638454 bool isPowerOfTwo(int x) { return x && !(x & (x - 1)); } // from https://stackoverflow.com/a/12506181 int nextPowerOfTwo(int x) { int power = 1; while (power < x) { power *= 2; } return power; } // from https://stackoverflow.com/a/36095407 // Get the current time in nanoseconds long get_nanos() { struct timespec ts; timespec_get(&ts, TIME_UTC); return (long)ts.tv_sec * 1000000000L + ts.tv_nsec; } /* Timings 'level 1' = blockscan 'l1 bcao' = blockscan with bcao The number before the time is the final element of the scanned array 20000000 Elements host : 89997032 in 42338 ms gpu : 89997032 in 16.285631 ms gpu bcao : 89997032 in 8.554880 ms 10000000 Elements host : 44983528 in 20749 ms gpu : 44983528 in 7.860768 ms gpu bcao : 44983528 in 4.304064 ms 1000000 Elements host : 4494474 in 2105 ms gpu : 4494474 in 0.975648 ms gpu bcao : 4494474 in 0.600416 ms 10000 Elements host : 45078 in 19 ms gpu : 45078 in 0.213760 ms gpu bcao : 45078 in 0.192128 ms 5000 Elements host : 22489 in 11 ms gpu : 22489 in 0.169312 ms gpu bcao : 22489 in 0.148832 ms 4096 Elements host : 18294 in 9 ms gpu : 18294 in 0.132672 ms gpu bcao : 18294 in 0.128480 ms 2048 Elements host : 9149 in 4 ms gpu : 9149 in 0.140736 ms gpu bcao : 9149 in 0.126944 ms 2000 Elements host : 8958 in 3 ms gpu : 8958 in 0.178912 ms gpu bcao : 8958 in 0.214464 ms 1000 Elements host : 4483 in 2 ms gpu : 4483 in 0.020128 ms gpu bcao : 4483 in 0.010784 ms level 1 : 4483 in 0.018080 ms l1 bcao : 4483 in 0.010400 ms 500 Elements host : 2203 in 4 ms gpu : 2203 in 0.013440 ms gpu bcao : 2203 in 0.009664 ms level 1 : 2203 in 0.013280 ms l1 bcao : 2203 in 0.010176 ms 100 Elements host : 356 in 0 ms gpu : 356 in 0.008512 ms gpu bcao : 356 in 0.009280 ms level 1 : 356 in 0.008896 ms l1 bcao : 356 in 0.009056 ms 64 Elements host : 221 in 0 ms gpu : 221 in 0.007584 ms gpu bcao : 221 in 0.008960 ms level 1 : 221 in 0.007360 ms l1 bcao : 221 in 0.008352 ms 8 Elements host : 24 in 0 ms gpu : 24 in 0.006240 ms gpu bcao : 24 in 0.007392 ms level 1 : 24 in 0.006176 ms l1 bcao : 24 in 0.007424 ms 5 Elements host : 12 in 0 ms gpu : 12 in 0.006144 ms gpu bcao : 12 in 0.007296 ms level 1 : 12 in 0.006048 ms l1 bcao : 12 in 0.007328 ms */
21,277
#include "includes.h" __global__ void neighbor_kernel(double *cellStatePtr, double *cellVDendPtr) { }
21,278
#include<stdio.h> int main(void){ printf("Hello world!"); return 0; }
21,279
/* * Copyright 2014 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #ifdef DEBUG #define CUDA_CALL(F) if( (F) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__); exit(-1);} #define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__-1); exit(-1);} #else #define CUDA_CALL(F) (F) #define CUDA_CHECK() #endif #define N 1024*1024*64 #define RADIUS 7 #define THREADS_PER_BLOCK 512 __global__ void stencil_1d(int n, double *in, double *out) { /* calculate global index in the array */ /* insert code to calculate global index in the array using block and thread built-in variables */ int gindex = threadIdx.x + blockIdx.x * blockDim.x; /* return if my global index is larger than the array size */ if( gindex >= n ) return; /* code to handle the boundary conditions */ if( gindex < RADIUS || gindex >= (n - RADIUS) ) { out[gindex] = (double) gindex * ( (double)RADIUS*2 + 1) ; return; } /* end if */ double result = 0.0; for( int i = gindex-(RADIUS); i <= gindex+(RADIUS); i++ ) { /* add the required elements from the array "in" to the temporary variable "result */ result += in[i]; } out[gindex] = result; return; } int main() { double *in, *out; double *d_in, *d_out; int size = N * sizeof( double ); /* allocate space for device copies of in, out */ CUDA_CALL( cudaMalloc( (void **) &d_in, size ) ); CUDA_CALL( cudaMalloc( (void **) &d_out, size ) ); /* allocate space for host copies of in, out and setup input values */ in = (double *)malloc( size ); out = (double *)malloc( size ); for( int i = 0; i < N; i++ ) { in[i] = (double) i; out[i] = 0; } /* copy inputs to device */ CUDA_CALL( cudaMemcpy( d_in, in, size, cudaMemcpyHostToDevice ) ); CUDA_CALL( cudaMemset( d_out, 0, size ) ); /* calculate block and grid sizes */ dim3 threads( THREADS_PER_BLOCK, 1, 1); /* insert code for proper number of blocks in X dimension */ dim3 blocks( N/THREADS_PER_BLOCK, 1, 1); /* start the timers */ cudaEvent_t start, stop; CUDA_CALL( cudaEventCreate( &start ) ); CUDA_CALL( cudaEventCreate( &stop ) ); CUDA_CALL( cudaEventRecord( start, 0 ) ); /* launch the kernel on the GPU */ stencil_1d<<< blocks, threads >>>( N, d_in, d_out ); CUDA_CHECK(); CUDA_CALL( cudaDeviceSynchronize() ); /* stop the timers */ CUDA_CALL( cudaEventRecord( stop, 0 ) ); CUDA_CALL( cudaEventSynchronize( stop ) ); float elapsedTime; CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf("Total time for %d elements was %f ms\n", N, elapsedTime ); /* copy result back to host */ CUDA_CALL( cudaMemcpy( out, d_out, size, cudaMemcpyDeviceToHost ) ); for( int i = 0; i < N; i++ ) { if( in[i]*( (double)RADIUS*2+1 ) != out[i] ) printf("error in element %d in = %f out %f\n",i,in[i],out[i] ); } /* end for */ /* clean up */ free(in); free(out); CUDA_CALL( cudaFree( d_in ) ); CUDA_CALL( cudaFree( d_out ) ); CUDA_CALL( cudaDeviceReset() ); return 0; } /* end main */
21,280
#include "includes.h" __global__ void pod_racing(unsigned int *d_rand, unsigned int *win, unsigned int *loss, unsigned int size, int *iter) { int index = threadIdx.x + blockDim.x*blockIdx.x; const unsigned int flips[] = { 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1 }; if (index < size) { //printf("%d ", iter[0]); if ((d_rand[index] % 2) != flips[iter[0]]) { iter[0] = 0; loss[index] = 1; //printf("loss "); } else { iter[0] = iter[0] + 1; if (iter[0] == 15) { win[index] = 1; iter[0] = 0; //printf("win "); } } } }
21,281
#include "cuda.h" #include "stdio.h" #include "stdlib.h" // for cuda profiler #include "cuda_profiler_api.h" #define M_s 1.f // Solar mass #define G 39.5f// Gravitational constant Solar mass, AU // single precision CUDA function to be called on GPU __device__ float potential_thingy(float x, float y) { return G * M_s * x / powf((powf(x, 2) + powf(y, 2)), 1.5f); } // euler method for velocity component __global__ void euler_integration_vx(float *x_out, float *y_out, float *vx_out, int n, int steps, int current_step, float dt) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < n){ vx_out[n*current_step+tid] = vx_out[(n*current_step-n)+tid] - potential_thingy(x_out[(n*current_step-n)+tid], y_out[(n*current_step-n)+tid]) * dt; tid += gridDim.x * blockDim.x; } } // euler method for position component __global__ void euler_integration_x(float *x_out, float *vx_out, int n, int steps, int current_step, float dt) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < n){ x_out[n*current_step+tid] = x_out[(n*current_step-n)+tid] + vx_out[n*current_step+tid] * dt; tid += gridDim.x * blockDim.x; } } extern "C" int integrate_euler_cuda(float *x, float *y, float *vx, float *vy, float *x_out, float *y_out, float *vx_out, float *vy_out, int n, int steps, float dt) { // dev_** variables for variables on CUDA device float *dev_x_out, *dev_y_out, *dev_vx_out, *dev_vy_out; // streams related constants and things const int nStreams = 4; // stream for kernel cudaStream_t stream[nStreams]; for (int i = 0; i < nStreams; ++i) cudaStreamCreate(&stream[i]); // allocate the memory on the GPU (VRAM) // cudaMalloc docs: http://horacio9573.no-ip.org/cuda/group__CUDART__MEMORY_gc63ffd93e344b939d6399199d8b12fef.html cudaMalloc((void**)&dev_x_out, steps * n * sizeof(float)); cudaMalloc((void**)&dev_y_out, steps * n * sizeof(float)); cudaMalloc((void**)&dev_vx_out, steps * n * sizeof(float)); cudaMalloc((void**)&dev_vy_out, steps * n * sizeof(float)); // map the arrays x, y, vx, vy to the corresponding GPU array // cudaMemcpy docs: http://horacio9573.no-ip.org/cuda/group__CUDART__MEMORY_g48efa06b81cc031b2aa6fdc2e9930741.html cudaMemcpy(&dev_x_out[0], &x[0], n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(&dev_y_out[0], &y[0], n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(&dev_vx_out[0], &vx[0], n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(&dev_vy_out[0], &vy[0], n * sizeof(float), cudaMemcpyHostToDevice); // loop time, because time steps cannot be paralleled int cstep = 1; // keep track of the time in integration while (cstep < steps){ // integrate velocity first in 2 concurrent kernel euler_integration_vx<<<128, 128, 0, stream[0]>>>(dev_x_out, dev_y_out, dev_vx_out, n, steps, cstep, dt); euler_integration_vx<<<128, 128, 0, stream[1]>>>(dev_y_out, dev_x_out, dev_vy_out, n, steps, cstep, dt); // as soon as any kernel finished computation, send the data back to CPU host cudaMemcpyAsync(&vx_out[cstep*n], &dev_vx_out[cstep*n], n * sizeof(float), cudaMemcpyDeviceToHost, stream[0]); cudaMemcpyAsync(&vy_out[cstep*n], &dev_vy_out[cstep*n], n * sizeof(float), cudaMemcpyDeviceToHost, stream[1]); // as soon as above finished, start corresponding position computation euler_integration_x<<<128, 128, 0, stream[2]>>>(dev_x_out, dev_vx_out, n, steps, cstep, dt); euler_integration_x<<<128, 128, 0, stream[3]>>>(dev_y_out, dev_vy_out, n, steps, cstep, dt); // as soon as any kernel finished computation, send the data back to CPU host cudaMemcpyAsync(&x_out[cstep*n], &dev_x_out[cstep*n], n * sizeof(float), cudaMemcpyDeviceToHost, stream[2]); cudaMemcpyAsync(&y_out[cstep*n], &dev_y_out[cstep*n], n * sizeof(float), cudaMemcpyDeviceToHost, stream[3]); // make sure above all finished to start next time step because next time step depends on this step cudaDeviceSynchronize(); cstep += 1; } // free the memory allocated on the GPU after integration, if really galpy, need to take care memory for real cudaFree(dev_x_out); cudaFree(dev_y_out); cudaFree(dev_vx_out); cudaFree(dev_vy_out); return 0; }
21,282
#include<stdio.h> #define N 2000000 #define BLOCK_SIZE 1024 //using namespace std; __global__ void ReduceMin(int n, int *input, int *output){ __shared__ int sh[BLOCK_SIZE]; int tid = threadIdx.x; int myId = threadIdx.x + blockIdx.x*blockDim.x; if(tid<BLOCK_SIZE) sh[tid] = input[myId]; else sh[tid] = INT_MAX; __syncthreads(); for(int i = blockDim.x/2; i>0;i>>=1){ if(tid<i) { if(sh[tid]>sh[tid+i]) // sh[tid]<sh[tid +i] for max sh[tid] = atomicMin(&sh[tid+i], sh[tid]); //atomicMax for max else sh[tid] = sh[tid]; } __syncthreads(); } if(tid==0) output[blockIdx.x] = sh[0]; } int main(){ int num_blocks; if(N%BLOCK_SIZE!=0) num_blocks = N/BLOCK_SIZE+1; else if(N/BLOCK_SIZE==0) num_blocks =1; else num_blocks= N/BLOCK_SIZE; int *h = (int*)malloc(sizeof(int)*N); int *d_h, *d_temp; int *h_temp = (int *) malloc(sizeof(int)*1); cudaMalloc((void **)&d_h, sizeof(int)*N); cudaMalloc((void **)&d_temp, sizeof(int)*num_blocks); for(int i =0;i<N;i++) h[i] = i+1; cudaMemcpy(d_h, h, sizeof(int)*N, cudaMemcpyHostToDevice); ReduceMin<<<num_blocks,BLOCK_SIZE>>>(BLOCK_SIZE, d_h, d_temp); cudaMemcpy(h, d_temp, sizeof(int)*num_blocks, cudaMemcpyDeviceToHost); int maxx = INT_MAX; //INT_MIN for max for(int i =0;i<num_blocks;i++){ if(h[i]<maxx &&h[i]!=0) //h[i]>maxx for max maxx = h[i]; } printf("%d", maxx); cudaFree(d_h); cudaFree(d_temp); }
21,283
#include "../include/encoding.cuh" __device__ float* get2df(float* p, const int x, int y, const int stride) { return (float*)((char*)p + x*stride) + y; } __global__ void encodeLevelId( float* level_hvs, float* id_hvs, float* feature_matrix, float* hv_matrix, int level_stride, int id_stride, int fm_stride, int N, int F, int Q, int D) { const int sample_idx = blockIdx.y; if (sample_idx >= N) return; const int d = threadIdx.x + blockIdx.x * blockDim.x; if (d >= D) return; int f; float encoded_hv_e = 0.0; #pragma unroll 1 for (f = 0; f < F; ++f) { float v = *get2df(feature_matrix, sample_idx, f, fm_stride); encoded_hv_e += *get2df(level_hvs, (int)(v * Q), d, level_stride) * \ *get2df(id_hvs, f, d, id_stride); } // binarize? MAS? hv_matrix[sample_idx * D + d] = encoded_hv_e; } __global__ void updateClassHV(float* hv_matrix, float* weights, int* y_pred, int N, int D) { const int d = threadIdx.x + blockIdx.x * blockDim.x; if (d >= D) return; const int MAX_CLASS = 50; // Maximum supported Classes float sum_temp[MAX_CLASS] = {0, }; // int class_cnt[MAX_CLASS] = {0, }; for(int ii = 0; ii < N; ++ii) { int idx = y_pred[ii]; sum_temp[idx] += hv_matrix[ii * D + d]; // 0~K-1 // class_cnt[idx]++; } for(int ii = 0; ii < N; ++ii) { int idx = y_pred[ii]; if (sum_temp[idx] == 0) weights[idx * D + d] = hv_matrix[20 * D + d]; //Randomize else weights[idx * D + d] = sum_temp[idx]; } }
21,284
#include <cuda.h> #include <device_launch_parameters.h> #define PIXEL_COLOR 0xFF585858; extern "C" { __constant__ int D_SIZE; __constant__ float D_ALPHA; __constant__ float D_BETA; //__constant__ float D_SCALE; __constant__ float D_XSCALE; __constant__ float D_YSCALE; __constant__ float D_XMIN; __constant__ float D_YMIN; __global__ void RegressionObserverKernel(float *xdata, float *ydata, unsigned int *pixels, int count) { int threadId = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; if (threadId < count) { float xvalue = xdata[threadId]; int xnew = (int)((xvalue - D_XMIN) * D_XSCALE); float yvalue = ydata[threadId]; int ynew = (int)((yvalue - D_YMIN) * D_YSCALE); int pixy = D_SIZE - ynew; int idx = pixy * D_SIZE + xnew; pixels[idx] = PIXEL_COLOR; } } }
21,285
// includes #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> //-------------Funcion sumar velocidad __global__ void densidad_suma_doble_if(float * pdist,float * psum, int node) { int ndist=9; //numero de funcion de distribucion int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x<node){ //para que se paralelice en cada nodo if (y<ndist){ //para que se paralelice en cada parte del vector de funcion de distribucion psum[x]+= pdist[(x*ndist+y)];} } } // nodo == x //velocidad == y
21,286
#include "includes.h" __global__ void markValidIndexMapPixelKernel( cudaTextureObject_t index_map, int validity_halfsize, unsigned img_rows, unsigned img_cols, unsigned char* flatten_validity_indicator ) { const auto x_center = threadIdx.x + blockDim.x * blockIdx.x; const auto y_center = threadIdx.y + blockDim.y * blockIdx.y; if(x_center >= img_cols || y_center >= img_rows) return; const auto offset = x_center + y_center * img_cols; //Only depend on this pixel if(validity_halfsize <= 0) { const auto surfel_index = tex2D<unsigned>(index_map, x_center, y_center); unsigned char validity = 0; if(surfel_index != 0xFFFFFFFF) validity = 1; //Write it and return flatten_validity_indicator[offset] = validity; return; } //Should perform a window search as the halfsize is at least 1 unsigned char validity = 1; for(auto y = y_center - validity_halfsize; y <= y_center + validity_halfsize; y++) { for(auto x = x_center - validity_halfsize; x <= x_center + validity_halfsize; x++) { if(tex2D<unsigned>(index_map, x, y) == 0xFFFFFFFF) validity = 0; } } //Save it flatten_validity_indicator[offset] = validity; }
21,287
// This code produces segmentation fault. // I have intentionally written the code to print out GPU array element directly, which is NOT possible #include <stdio.h> __global__ void cube(float *d_out, float *d_in) { int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f; return; } int main(int argc, char **argv) { const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; ++i) { h_in[i] = float(i); } // declare GPU memory pointers float *d_in; float *d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // TODO - DO NOT DO THIS // Instead create a new array to store the results on the CPU and use cudaMemcpy to transfer from d_out to this new array // Then, print out the results from that CPU array // print out the resulting array for(int i = 0; i < ARRAY_SIZE; ++i) { printf("%f", d_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); return 0; }
21,288
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ // INSERT KERNEL CODE HERE __shared__ float sdata[2 * BLOCK_SIZE]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; if (start + t < size) sdata[t] = in[start + t]; else sdata[t] = 0; if (start + BLOCK_SIZE + t < size) sdata[BLOCK_SIZE + t] = in[start + BLOCK_SIZE + t]; else sdata[BLOCK_SIZE + t] = 0; for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (t < stride) sdata[t] += sdata[t+stride]; } if (t == 0) out[blockIdx.x] = sdata[0]; }
21,289
#include<cstdlib> #include<stdio.h> void initialize(float* mtx, int const nx, int const ny){ int tmp = nx*ny; for(int i=0; i<tmp; i++){ mtx[i] = rand()/(float)RAND_MAX; } }; __global__ void sumMatrix2D2D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){ int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int mp = j*ny+i; d_c[mp] = d_a[mp] + d_b[mp]; }; __global__ void sumMatrix1D1D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){ int i = blockIdx.x; for (; i < nx; i += gridDim.x){ int j = threadIdx.x; for (; j < ny; j += blockDim.x){ int mp = i*ny + j; d_c[mp] = d_a[mp] + d_b[mp]; } } }; int main(int argc, char **argv){ int const nx = 1<<14; int const ny = 1<<14; size_t mSize = nx*ny*sizeof(float); float* h_a; h_a = (float*)malloc(mSize); float* h_b; h_b = (float*)malloc(mSize); float* h_c; h_c = (float*)malloc(mSize); initialize(h_a, nx, ny); initialize(h_b, nx, ny); float* d_a; float* d_b; float* d_c; cudaMalloc((void**)&d_a, mSize); cudaMalloc((void**)&d_b, mSize); cudaMalloc((void**)&d_c, mSize); cudaMemcpy(d_a, h_a, mSize, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, mSize, cudaMemcpyHostToDevice); int xBlock = 32; int yBlock = 16; if(argc > 1) xBlock = atoi(argv[1]); if(argc > 2) yBlock = atoi(argv[2]); dim3 block(xBlock, yBlock); dim3 grid(nx/xBlock, ny/yBlock); printf("run with block %d, %d", xBlock, yBlock); sumMatrix2D2D<<<grid, block>>>(d_a, d_b, d_c, nx, ny); cudaMemcpy(h_c, d_c, mSize, cudaMemcpyDeviceToHost); for (int i=0; i<nx*ny; i++){ if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) { printf("2D2D"); printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i); break; } } sumMatrix1D1D<<<128, 128>>>(d_a, d_b, d_c, nx, ny); cudaMemcpy(h_c, d_c, mSize, cudaMemcpyDeviceToHost); for (int i=0; i<nx*ny; i++){ if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) { printf("1D1D"); printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i); break; } } return 0; }
21,290
#include <cuda.h> #include <vector> #include <cstdio> #include <cstdlib> template <typename T, std::size_t capacity> struct queue { int size = 0; T data[capacity]; __device__ bool insert(const T& value) { int result = atomicAdd(&size, 1); if (result >= capacity) { // Queue is overflowing. Do nothing. return false; } else { data[result] = value; return true; } } }; constexpr size_t queue_size = 1000; __global__ void kernel(queue<int, queue_size>* queues, int n) { size_t tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < n) { for (size_t i = 0; i < n; i++) { queues[i].insert(tid); } } } int main(int argc, char** argv) { constexpr size_t n = queue_size; std::vector<queue<int, n>> queues(n); queue<int, n>* d_queues; cudaMalloc(&d_queues, sizeof(queue<int, n>)*n); cudaMemcpy(d_queues, queues.data(), sizeof(queue<int, n>)*n, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); size_t block_size = 256; // ceil(grid_size / block_size) dim3 grid((n + block_size - 1) / block_size); dim3 block(block_size); kernel<<<grid, block>>>(d_queues, n); cudaMemcpy(queues.data(), d_queues, sizeof(queue<int, n>)*n, cudaMemcpyDeviceToHost); bool success = true; for (size_t i = 0; i < n; i++) { queue<int, n>& queue = queues[i]; if (queue.size != n) { success = false; break; } std::vector<size_t> histogram(n, 0); for (size_t i = 0; i < n; i++) { if (queue.data[i] < 0 && queue.data[i] >= n) { success = false; break; } histogram[queue.data[i]] += 1; if (histogram[queue.data[i]] != 1) { success = false; break; } } } if (success) { printf("OK!\n"); } else { printf("FAILED.\n"); } cudaDeviceSynchronize(); cudaFree(d_queues); return 0; }
21,291
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> void Multiplication(float *__restrict__ c, float * a, float * b, int N) { #pragma acc parallel loop present(c, a, b) for (int n = 0; n < N; n++) { for (int m = 0; m < N; m++) { float sum = 0.f; for (int k = 0; k < N; k++) { sum += a[k + n * N] * b[k * N + m ]; } c[m + n * N] = sum; } } } int main() { int i; const int N = 4; float **Matrix_A = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_A[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_B = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_B[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_C = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_C[i] = (float*)malloc(N * sizeof(float*)); } float * a = (float *)malloc(N * N * sizeof(float*)), * b = (float *)malloc(N * N * sizeof(float*)), * c = (float *)malloc(N * N * sizeof(float*)); srand(time(NULL)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { Matrix_A[i][j] = rand() % 5; Matrix_B[i][j] = rand() % 10; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { a[j + i * N] = Matrix_A[i][j]; b[j + i * N] = Matrix_B[i][j]; } } #pragma acc data copyin (a[0:N*N], b[0:N*N]) copyout (c[0:N*N]) { Multiplication(c, a, b, N); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_A[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_B[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Matrix_C[i][j] = c[j + i * N]; printf("%3.0f ", Matrix_C[i][j]); } printf("\n"); } free(a); free(b); free(c); free(Matrix_A); free(Matrix_B); free(Matrix_C); }
21,292
#include <stdio.h> #include <math.h> #define N 1000000 // function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = threadIdx.x ; int stride = blockDim.x ; for (int i=index;i<=n;i+=stride) y[i] = x[i] + y[i]; } int main(void) { int i; float maxError = 0.0f; float x[N], y[N], *d_x, *d_y; for (i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); cudaMemcpy(d_x, &x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, &y, N*sizeof(float), cudaMemcpyHostToDevice); // Run kernel on the elements on the GPU add<<<1,256>>>(N, d_x, d_y); cudaMemcpy(&y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_x); cudaFree(d_y); // Check for errors (all values should be 3.0f) for (i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); printf("Max error: %f\n", maxError); }
21,293
#include "includes.h" const int Nthreads = 1024, maxFR = 5000, NrankMax = 6; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void extract_snips2(const double *Params, const float *err, const int *st, const int *id, const int *counter, const int *kk, const int *iC, const float *W, float *WU){ int nt0, tidx, tidy, bid, ind, icl, Nchan, Nmax, Nsum, NchanNear; //NT = (int) Params[0]; nt0 = (int) Params[4]; Nchan = (int) Params[9]; Nsum = (int) Params[13]; NchanNear = (int) Params[10]; tidx = threadIdx.x; bid = blockIdx.x; Nmax = min(maxFR, counter[1]); for(ind=0; ind<Nmax;ind++) if (id[ind]==bid){ tidy = threadIdx.y; icl = kk[st[ind]]; while (tidy<(1+icl%Nsum)){ //WU[tidx+tidy*nt0 + nt0*Nchan * ind] = dataraw[st[ind]+tidx + NT * tidy]; WU[tidx + iC[tidy + bid*NchanNear]*nt0 + nt0*Nchan*ind] = sqrt(err[st[ind]] / (1.+icl%Nsum)) * W[tidx + nt0 * int(icl/Nsum)]; tidy+=blockDim.y; } } }
21,294
#include <stdio.h> // Reduce __global__ void reduce_kernel(float * d_out, const float * d_in, int n, int op) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // load shared mem from global mem if(myId>=n) return; sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if ((myId+s<n) && (tid < s) ) { if (op==0){ sdata[tid]=min(sdata[tid], sdata[tid+s]); }else if (op==1){ sdata[tid]=max(sdata[tid], sdata[tid+s]); }else{ sdata[tid] += sdata[tid + s]; } } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } __global__ void print_vals2(float *d_min, float * d_max){ printf("Naive %f %f\n", *d_min, *d_max); } __global__ void print_arr(float *arr, int N){ for(int i=0; i<N; ++i){ printf("%d %f ",i, arr[i]); } printf("\n"); } __host__ __device__ unsigned int round2power(unsigned int v){ // unsigned int v; // compute the next highest power of 2 of 32-bit v v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } void reduce(const float* const d_in,float & h_reduce, int h_op, const size_t N){ unsigned int Nblock, Nthread; float *d_reduce; cudaMalloc(&d_reduce,sizeof(float)); // int h_op=0; // 0 Min, 1 Max, else Sum Nblock=1; Nthread=round2power(N); // need to be power of 2 cudaMemcpy(d_reduce,&h_reduce,sizeof(float),cudaMemcpyHostToDevice); // cudaMemcpy(d_maxlum,&h_maxlum,sizeof(float),cudaMemcpyHostToDevice); reduce_kernel<<<Nblock, Nthread, Nthread * sizeof(float)>>>(d_reduce,d_in,N,h_op); cudaMemcpy(&h_reduce, d_reduce, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_reduce); } // __global__ // void search_minmax(const float* const d_lumin, float *d_min, float *d_max, const int N){ // for (int i=0;i<N;i++){ // // if (*d_min>d_lumin[i]) *d_min=d_lumin[i]; // // if (*d_max<d_lumin[i]) *d_max=d_lumin[i]; // *d_min = min(*d_min, d_lumin[i]); // *d_max = max(*d_max, d_lumin[i]); // } // } // void naive_minmax(const float* const d_lumin, float &h_min, float &h_max, const int N){ // float * d_min, * d_max; // cudaMalloc(&d_min, sizeof(float)); // cudaMalloc(&d_max, sizeof(float)); // cudaMemcpy(d_min, &h_min, sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_max, &h_max, sizeof(float), cudaMemcpyHostToDevice); // search_minmax<<<1,1>>>(d_lumin, d_min, d_max, N); // // print_vals2<<<1,1>>>(d_min, d_max); // cudaMemcpy(&h_min, d_min, sizeof(float), cudaMemcpyDeviceToHost); // cudaMemcpy(&h_max, d_max, sizeof(float), cudaMemcpyDeviceToHost); // cudaFree(d_min); cudaFree(d_max); // } int main(){ const int N=300; // Need to be smaller than 1024 int sizeN=N*sizeof(float); // int h_op; float * h_input = new float[N]; float *d_input; float h_min= 99999.0f, h_max= -99999.0f; for(int i=0;i<N;++i){ h_input[i]=(rand())%N/10.0f;//-50.0f; } cudaMalloc(&d_input, sizeN); cudaMemcpy(d_input,h_input,sizeN,cudaMemcpyHostToDevice); // naive_minmax(d_input, h_min, h_max, N); // printf("%f %f \n", h_min, h_max); reduce(d_input, h_min, 0,N); reduce(d_input, h_max, 1,N); printf("%f %f \n", h_min, h_max); cudaFree(d_input); delete [] h_input; }
21,295
#include <vector> #include <algorithm> #include <cstdlib> #include <cstdio> #include <time.h> #include <cassert> #define ITERATIONS 1 #define FINDS 10000 #define M 1046527 #define LINEAR 1 #define BINARY 2 using namespace std; int main(int argc, char ** argv){ long total_time = 0; struct timespec start, stop; std::vector<int> vec; unsigned long N = 32000; int method = 1; if(argc > 1){ N = atol(argv[1]); if(N <= 0){ printf("N must be greater than 0\n"); return 1; } } if(argc > 2){ method = atoi(argv[2]); if(method != 1 && method != 2){ printf("Search method parameters may be 1 (Linear) or 2 (Binary).\n"); return 1; } } vec.resize(N); srand(100); unsigned long numFound = 0; for(int i = 0; i < ITERATIONS; i++){ for(int c = 0; c < N; c++){ vec[c] = rand() % M; } clock_gettime(CLOCK_REALTIME,&start); if(method == LINEAR){ for(int c = 0; c < FINDS; c++){ if(find(vec.begin(),vec.end(),rand() % M) != vec.end()){ ++numFound; } } }else{ sort(vec.begin(),vec.end()); for(int c = 0; c < FINDS; c++){ if(binary_search(vec.begin(),vec.end(),rand() % M)){ ++numFound; } } } clock_gettime(CLOCK_REALTIME,&stop); total_time += ((stop.tv_sec-start.tv_sec)*1000000000) + (stop.tv_nsec - start.tv_nsec); } printf("%lu ns\n", total_time); printf("Found: %lu\n",numFound); return 0; }
21,296
#include <stdio.h> #include <math.h> __global__ void MatAdd(const float *A, const float *B, float *C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < N && j < N) { int indx = i + j*N; C[indx] = A[indx] + B[indx]; } } /** * Host main routine */ int main(void) { // Print the vector length to be used, and compute its size int N = 100; size_t size = N*N*sizeof(float); printf("[Matrix size of %dx%d elements]\n", N, N); // allocate memory float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); // Initialize the host input vectors for (int i = 0; i < N; ++i) { for (int j = 0; j < N ; ++j){ h_A[i+j*N] = 1;//rand()/(float)RAND_MAX; h_B[i+j*N] = 1; rand()/(float)RAND_MAX; } } // ALLOCATE DEVICE MEMORY float *d_A = NULL; cudaMalloc((void **)&d_A, size); float *d_B = NULL; cudaMalloc((void **)&d_B, size); float *d_C = NULL; cudaMalloc((void **)&d_C, size); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); /// Beware no more than that // Launch the Vector Add CUDA Kernel dim3 B(32,32);// B.x=32, B.y=32, B.z=1 dim3 G(N/32 +1, N/32+1); printf("CUDA kernel launch with %dx%dx%d blocks of %dx%dx%d threads\n", B.x, B.y, B.z, G.x, G.y, G.z); MatAdd<<<G, B>>>(d_A, d_B, d_C, N); // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // verify the result float sum = 0; for (unsigned int i=0; i < N ; ++i){ for (unsigned int j=0; j < N; ++j){ sum += h_C[i+j*N]; } } if (fabs(sum-N*N*2) < 1.0e-5){ printf("Test PASSED\n"); }else{ printf("Test Don't pass: you fucked up!\n"); } // Free device global memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
21,297
#include "includes.h" __global__ void NormalizePositionKernel( float *input, float *normalized, float xMax, float yMax ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { normalized[0] = input[0] / xMax; normalized[1] = input[1] / yMax; } }
21,298
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda.h> #include <cuda_runtime.h> namespace nvinfer1 { namespace plugin { template <typename T> __global__ void cropAndResizeKernel(const int nthreads, const T* image_ptr, const float* boxes_ptr, int num_boxes, int batch, int image_height, int image_width, int crop_height, int crop_width, int depth, float extrapolation_value, float* crops_ptr) { for (int out_idx = threadIdx.x + blockIdx.x * blockDim.x ; out_idx < nthreads; out_idx += blockDim.x * gridDim.x) { int idx = out_idx; const int x = idx % crop_width; idx /= crop_width; const int y = idx % crop_height; idx /= crop_height; const int d = idx % depth; const int b = idx / depth; const float y1 = boxes_ptr[b * 4]; const float x1 = boxes_ptr[b * 4 + 1]; const float y2 = boxes_ptr[b * 4 + 2]; const float x2 = boxes_ptr[b * 4 + 3]; //each image has num_boxes of boxes, so we simply divide to get the box index. const int b_in = b / num_boxes; if (b_in < 0 || b_in >= batch) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { crops_ptr[out_idx] = extrapolation_value; continue; } const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { crops_ptr[out_idx] = extrapolation_value; continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; const float top_left(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + top_y_index) * image_width + left_x_index])); const float top_right(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + top_y_index) * image_width + right_x_index])); const float bottom_left(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + bottom_y_index) * image_width + left_x_index])); const float bottom_right(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + bottom_y_index) * image_width + right_x_index])); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; crops_ptr[out_idx] = top + (bottom - top) * y_lerp; } } int cropAndResizeInference( cudaStream_t stream, int n, const void* image, const void* rois, int batch_size, int input_height, int input_width, int num_boxes, int crop_height, int crop_width, int depth, void* output) { int output_volume = batch_size * num_boxes * crop_height * crop_width * depth; int block_size = 1024; int grid_size = (output_volume + block_size - 1 ) / block_size; cropAndResizeKernel<float> <<< grid_size, block_size, 0, stream>>>(output_volume, static_cast<const float*>(image), static_cast<const float*>(rois), num_boxes, batch_size, input_height, input_width, crop_height, crop_width, depth, 0.0f, static_cast<float*>(output)); return 0; } } // namespace plugin } // namespace nvinfer1
21,299
#include "includes.h" __device__ unsigned int Rand(unsigned int randx) { randx = randx*1103515245+12345; return randx&2147483647; } __global__ void setRandom(float *gpu_array, int N, int maxval ) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if( id < N ){ gpu_array[id] = 1.0f / maxval * Rand(id) / float( RAND_MAX ); } }
21,300
#include <float.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #define BLOCK_SIZE 50 //Um teste comparando a eficiência de uma //multiplicação de matrizes por CPU ou GPU //utilizando memória compartilhada ou global typedef struct { int width; int height; int stride; float *elements; } Matrix; void startSeed() { srand(time(NULL)); int seed = rand(); srand(seed); } void draw_random(Matrix mat) { for (int i = 0; i < mat.height*mat.width; i++) { mat.elements[i] = (float) (rand() % 10); } } void disp_img(Matrix mat) { for (int i = 0; i < mat.height; i++) { for (int j = 0; j < mat.width; j++) { printf("%5.0f", mat.elements[i*mat.width + j]); } printf("\n"); } printf("\n"); } Matrix createMatrix(int height, int width) { Matrix mat; mat.width = width; mat.height = height; mat.elements = (float*) malloc(mat.width*mat.height*sizeof(float)); for(int i = 0; i < mat.height; i++) for(int j = 0; j < mat.width; j++) mat.elements[i*mat.width + j] = 0; return mat; } void multiMatrixCPU(Matrix A, Matrix B, Matrix C) { for (int i = 0; i < A.height; i++) { for (int j = 0; j < B.width; j++) { C.elements[j + i * B.width] = 0; for (int k = 0; k < A.width; k++) { C.elements[j + i * C.width] += A.elements[k + i * A.width] * B.elements[j + k * B.width]; } } } } __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.stride + col] = value; } __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } __global__ void MatMulKernelShared(Matrix A, Matrix B, Matrix C) { int blockRow = blockIdx.y; int blockCol = blockIdx.x; Matrix Csub = GetSubMatrix(C, blockRow, blockCol); float Cvalue = 0; int row = threadIdx.y; int col = threadIdx.x; for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { Matrix Asub = GetSubMatrix(A, blockRow, m); Matrix Bsub = GetSubMatrix(B, m, blockCol); __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); __syncthreads(); for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; __syncthreads(); } SetElement(Csub, row, col, Cvalue); } void MatMulShared(const Matrix A, const Matrix B, Matrix C) { Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernelShared<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < A.height && col < B.width) { for (int e = 0; e < A.width; e++) Cvalue += (A.elements[row * A.width + e]) * (B.elements[e * B.width + col]); C.elements[row * C.width + col] = Cvalue; } } void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x, (A.height + dimBlock.y - 1) / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaThreadSynchronize(); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } int main(int argc, char* argv[]) { clock_t tic; clock_t toc; Matrix A; Matrix B; Matrix C; int a; int b; int c; startSeed(); int num_devices, device; printf("Multiplicação de Matrizes\n"); printf("Neste programa foi utilizado um BLOCK_SIZE = 50\n\n"); cudaGetDeviceCount(&num_devices); for (device = 0; device < num_devices; device++) { cudaDeviceProp properties; cudaGetDeviceProperties(&properties, device); cudaSetDevice(device); printf("Utilizando uma %s:\n\n", properties.name); for (int i = 100; i <= 1000; i += 100) { a = i; b = i; c = i; A = createMatrix(a, b); B = createMatrix(b, c); C = createMatrix(A.height, B.width); printf("A[%d][%d] * B[%d][%d]\n", a, b, b, c); draw_random(A); draw_random(B); tic = clock(); MatMul(A, B, C); toc = clock(); printf("GPU (global): %.3fms\n", (double)(toc - tic) / CLOCKS_PER_SEC*1000); C = createMatrix(A.height, B.width); draw_random(A); draw_random(B); tic = clock(); MatMulShared(A, B, C); toc = clock(); printf("GPU (shared): %.3fms\n", (double)(toc - tic) / CLOCKS_PER_SEC*1000); C = createMatrix(A.height, B.width); draw_random(A); draw_random(B); tic = clock(); multiMatrixCPU(A, B, C); toc = clock(); printf("CPU: %.3fms\n", (double)(toc - tic) / CLOCKS_PER_SEC*1000); printf("\n"); } } }